[llvm] r353455 - GlobalISel: Implement narrowScalar for shift main type
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 7 11:37:44 PST 2019
Author: arsenm
Date: Thu Feb 7 11:37:44 2019
New Revision: 353455
URL: http://llvm.org/viewvc/llvm-project?rev=353455&view=rev
Log:
GlobalISel: Implement narrowScalar for shift main type
This is pretty much directly ported from SelectionDAG. Doesn't include
the shift by non-constant but known bits version, since there isn't a
globalisel version of computeKnownBits yet.
This shows a disadvantage of targets not specifically which type
should be used for the shift amount. If type 0 is legalized before
type 1, the operations on the shift amount type use the wider type
(which are also less likely to legalize). This can be avoided by
targets specifying legalization actions on type 1 earlier than for
type 0.
Modified:
llvm/trunk/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
llvm/trunk/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
llvm/trunk/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
llvm/trunk/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
llvm/trunk/lib/Target/AArch64/AArch64LegalizerInfo.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir
llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir
llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-unmerge-values.mir
llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-ashr.mir
llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-lshr.mir
llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-merge-values.mir
llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir
llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-unmerge-values.mir
Modified: llvm/trunk/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h?rev=353455&r1=353454&r2=353455&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h (original)
+++ llvm/trunk/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h Thu Feb 7 11:37:44 2019
@@ -175,6 +175,10 @@ private:
LegalizeResult
reduceLoadStoreWidth(MachineInstr &MI, unsigned TypeIdx, LLT NarrowTy);
+ LegalizeResult narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
+ LLT HalfTy, LLT ShiftAmtTy);
+
+ LegalizeResult narrowScalarShift(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult narrowScalarMul(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
LegalizeResult narrowScalarSelect(MachineInstr &MI, unsigned TypeIdx, LLT Ty);
Modified: llvm/trunk/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h?rev=353455&r1=353454&r2=353455&view=diff
==============================================================================
--- llvm/trunk/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h (original)
+++ llvm/trunk/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h Thu Feb 7 11:37:44 2019
@@ -203,6 +203,7 @@ protected:
void validateTruncExt(const LLT &Dst, const LLT &Src, bool IsExtend);
void validateBinaryOp(const LLT &Res, const LLT &Op0, const LLT &Op1);
+ void validateShiftOp(const LLT &Res, const LLT &Op0, const LLT &Op1);
void validateSelectOp(const LLT &ResTy, const LLT &TstTy, const LLT &Op0Ty,
const LLT &Op1Ty);
@@ -1163,6 +1164,18 @@ public:
return buildInstr(TargetOpcode::G_SHL, {Dst}, {Src0, Src1}, Flags);
}
+ MachineInstrBuilder buildLShr(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_LSHR, {Dst}, {Src0, Src1}, Flags);
+ }
+
+ MachineInstrBuilder buildAShr(const DstOp &Dst, const SrcOp &Src0,
+ const SrcOp &Src1,
+ Optional<unsigned> Flags = None) {
+ return buildInstr(TargetOpcode::G_ASHR, {Dst}, {Src0, Src1}, Flags);
+ }
+
/// Build and insert \p Res = G_AND \p Op0, \p Op1
///
/// G_AND sets \p Res to the bitwise and of integer parameters \p Op0 and \p
Modified: llvm/trunk/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/LegalizerHelper.cpp?rev=353455&r1=353454&r2=353455&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/LegalizerHelper.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/LegalizerHelper.cpp Thu Feb 7 11:37:44 2019
@@ -811,14 +811,8 @@ LegalizerHelper::LegalizeResult Legalize
}
case TargetOpcode::G_SHL:
case TargetOpcode::G_LSHR:
- case TargetOpcode::G_ASHR: {
- if (TypeIdx != 1)
- return UnableToLegalize; // TODO
- Observer.changingInstr(MI);
- narrowScalarSrc(MI, NarrowTy, 2);
- Observer.changedInstr(MI);
- return Legalized;
- }
+ case TargetOpcode::G_ASHR:
+ return narrowScalarShift(MI, TypeIdx, NarrowTy);
case TargetOpcode::G_CTLZ:
case TargetOpcode::G_CTLZ_ZERO_UNDEF:
case TargetOpcode::G_CTTZ:
@@ -2195,6 +2189,221 @@ LegalizerHelper::fewerElementsVector(Mac
}
LegalizerHelper::LegalizeResult
+LegalizerHelper::narrowScalarShiftByConstant(MachineInstr &MI, const APInt &Amt,
+ const LLT HalfTy, const LLT AmtTy) {
+
+ unsigned InL = MRI.createGenericVirtualRegister(HalfTy);
+ unsigned InH = MRI.createGenericVirtualRegister(HalfTy);
+ MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg());
+
+ if (Amt.isNullValue()) {
+ MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {InL, InH});
+ MI.eraseFromParent();
+ return Legalized;
+ }
+
+ LLT NVT = HalfTy;
+ unsigned NVTBits = HalfTy.getSizeInBits();
+ unsigned VTBits = 2 * NVTBits;
+
+ SrcOp Lo(0), Hi(0);
+ if (MI.getOpcode() == TargetOpcode::G_SHL) {
+ if (Amt.ugt(VTBits)) {
+ Lo = Hi = MIRBuilder.buildConstant(NVT, 0);
+ } else if (Amt.ugt(NVTBits)) {
+ Lo = MIRBuilder.buildConstant(NVT, 0);
+ Hi = MIRBuilder.buildShl(NVT, InL,
+ MIRBuilder.buildConstant(AmtTy, Amt - NVTBits));
+ } else if (Amt == NVTBits) {
+ Lo = MIRBuilder.buildConstant(NVT, 0);
+ Hi = InL;
+ } else {
+ Lo = MIRBuilder.buildShl(NVT, InL, MIRBuilder.buildConstant(AmtTy, Amt));
+ Hi = MIRBuilder.buildOr(
+ NVT,
+ MIRBuilder.buildShl(NVT, InH, MIRBuilder.buildConstant(AmtTy, Amt)),
+ MIRBuilder.buildLShr(
+ NVT, InL, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits)));
+ }
+ } else if (MI.getOpcode() == TargetOpcode::G_LSHR) {
+ if (Amt.ugt(VTBits)) {
+ Lo = Hi = MIRBuilder.buildConstant(NVT, 0);
+ } else if (Amt.ugt(NVTBits)) {
+ Lo = MIRBuilder.buildLShr(NVT, InH,
+ MIRBuilder.buildConstant(AmtTy, Amt - NVTBits));
+ Hi = MIRBuilder.buildConstant(NVT, 0);
+ } else if (Amt == NVTBits) {
+ Lo = InH;
+ Hi = MIRBuilder.buildConstant(NVT, 0);
+ } else {
+ auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt);
+
+ auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst);
+ auto OrRHS = MIRBuilder.buildShl(
+ NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits));
+
+ Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS);
+ Hi = MIRBuilder.buildLShr(NVT, InH, ShiftAmtConst);
+ }
+ } else {
+ if (Amt.ugt(VTBits)) {
+ Hi = Lo = MIRBuilder.buildAShr(
+ NVT, InH, MIRBuilder.buildConstant(AmtTy, NVTBits - 1));
+ } else if (Amt.ugt(NVTBits)) {
+ Lo = MIRBuilder.buildAShr(NVT, InH,
+ MIRBuilder.buildConstant(AmtTy, Amt - NVTBits));
+ Hi = MIRBuilder.buildAShr(NVT, InH,
+ MIRBuilder.buildConstant(AmtTy, NVTBits - 1));
+ } else if (Amt == NVTBits) {
+ Lo = InH;
+ Hi = MIRBuilder.buildAShr(NVT, InH,
+ MIRBuilder.buildConstant(AmtTy, NVTBits - 1));
+ } else {
+ auto ShiftAmtConst = MIRBuilder.buildConstant(AmtTy, Amt);
+
+ auto OrLHS = MIRBuilder.buildLShr(NVT, InL, ShiftAmtConst);
+ auto OrRHS = MIRBuilder.buildShl(
+ NVT, InH, MIRBuilder.buildConstant(AmtTy, -Amt + NVTBits));
+
+ Lo = MIRBuilder.buildOr(NVT, OrLHS, OrRHS);
+ Hi = MIRBuilder.buildAShr(NVT, InH, ShiftAmtConst);
+ }
+ }
+
+ MIRBuilder.buildMerge(MI.getOperand(0).getReg(), {Lo.getReg(), Hi.getReg()});
+ MI.eraseFromParent();
+
+ return Legalized;
+}
+
+// TODO: Optimize if constant shift amount.
+LegalizerHelper::LegalizeResult
+LegalizerHelper::narrowScalarShift(MachineInstr &MI, unsigned TypeIdx,
+ LLT RequestedTy) {
+ if (TypeIdx == 1) {
+ Observer.changingInstr(MI);
+ narrowScalarSrc(MI, RequestedTy, 2);
+ Observer.changedInstr(MI);
+ return Legalized;
+ }
+
+ unsigned DstReg = MI.getOperand(0).getReg();
+ LLT DstTy = MRI.getType(DstReg);
+ if (DstTy.isVector())
+ return UnableToLegalize;
+
+ unsigned Amt = MI.getOperand(2).getReg();
+ LLT ShiftAmtTy = MRI.getType(Amt);
+ const unsigned DstEltSize = DstTy.getScalarSizeInBits();
+ if (DstEltSize % 2 != 0)
+ return UnableToLegalize;
+
+ // Ignore the input type. We can only go to exactly half the size of the
+ // input. If that isn't small enough, the resulting pieces will be further
+ // legalized.
+ const unsigned NewBitSize = DstEltSize / 2;
+ const LLT HalfTy = LLT::scalar(NewBitSize);
+ const LLT CondTy = LLT::scalar(1);
+
+ if (const MachineInstr *KShiftAmt =
+ getOpcodeDef(TargetOpcode::G_CONSTANT, Amt, MRI)) {
+ return narrowScalarShiftByConstant(
+ MI, KShiftAmt->getOperand(1).getCImm()->getValue(), HalfTy, ShiftAmtTy);
+ }
+
+ // TODO: Expand with known bits.
+
+ // Handle the fully general expansion by an unknown amount.
+ auto NewBits = MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize);
+
+ unsigned InL = MRI.createGenericVirtualRegister(HalfTy);
+ unsigned InH = MRI.createGenericVirtualRegister(HalfTy);
+ MIRBuilder.buildUnmerge({InL, InH}, MI.getOperand(1).getReg());
+
+ auto AmtExcess = MIRBuilder.buildSub(ShiftAmtTy, Amt, NewBits);
+ auto AmtLack = MIRBuilder.buildSub(ShiftAmtTy, NewBits, Amt);
+
+ auto Zero = MIRBuilder.buildConstant(ShiftAmtTy, 0);
+ auto IsShort = MIRBuilder.buildICmp(ICmpInst::ICMP_ULT, CondTy, Amt, NewBits);
+ auto IsZero = MIRBuilder.buildICmp(ICmpInst::ICMP_EQ, CondTy, Amt, Zero);
+
+ unsigned ResultRegs[2];
+ switch (MI.getOpcode()) {
+ case TargetOpcode::G_SHL: {
+ // Short: ShAmt < NewBitSize
+ auto LoS = MIRBuilder.buildShl(HalfTy, InH, Amt);
+
+ auto OrLHS = MIRBuilder.buildShl(HalfTy, InH, Amt);
+ auto OrRHS = MIRBuilder.buildLShr(HalfTy, InL, AmtLack);
+ auto HiS = MIRBuilder.buildOr(HalfTy, OrLHS, OrRHS);
+
+ // Long: ShAmt >= NewBitSize
+ auto LoL = MIRBuilder.buildConstant(HalfTy, 0); // Lo part is zero.
+ auto HiL = MIRBuilder.buildShl(HalfTy, InL, AmtExcess); // Hi from Lo part.
+
+ auto Lo = MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL);
+ auto Hi = MIRBuilder.buildSelect(
+ HalfTy, IsZero, InH, MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL));
+
+ ResultRegs[0] = Lo.getReg(0);
+ ResultRegs[1] = Hi.getReg(0);
+ break;
+ }
+ case TargetOpcode::G_LSHR: {
+ // Short: ShAmt < NewBitSize
+ auto HiS = MIRBuilder.buildLShr(HalfTy, InH, Amt);
+
+ auto OrLHS = MIRBuilder.buildLShr(HalfTy, InL, Amt);
+ auto OrRHS = MIRBuilder.buildShl(HalfTy, InH, AmtLack);
+ auto LoS = MIRBuilder.buildOr(HalfTy, OrLHS, OrRHS);
+
+ // Long: ShAmt >= NewBitSize
+ auto HiL = MIRBuilder.buildConstant(HalfTy, 0); // Hi part is zero.
+ auto LoL = MIRBuilder.buildLShr(HalfTy, InH, AmtExcess); // Lo from Hi part.
+
+ auto Lo = MIRBuilder.buildSelect(
+ HalfTy, IsZero, InL, MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL));
+ auto Hi = MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL);
+
+ ResultRegs[0] = Lo.getReg(0);
+ ResultRegs[1] = Hi.getReg(0);
+ break;
+ }
+ case TargetOpcode::G_ASHR: {
+ // Short: ShAmt < NewBitSize
+ auto HiS = MIRBuilder.buildAShr(HalfTy, InH, Amt);
+
+ auto OrLHS = MIRBuilder.buildLShr(HalfTy, InL, Amt);
+ auto OrRHS = MIRBuilder.buildLShr(HalfTy, InH, AmtLack);
+ auto LoS = MIRBuilder.buildOr(HalfTy, OrLHS, OrRHS);
+
+ // Long: ShAmt >= NewBitSize
+
+ // Sign of Hi part.
+ auto HiL = MIRBuilder.buildAShr(
+ HalfTy, InH, MIRBuilder.buildConstant(ShiftAmtTy, NewBitSize - 1));
+
+ auto LoL = MIRBuilder.buildAShr(HalfTy, InH, AmtExcess); // Lo from Hi part.
+
+ auto Lo = MIRBuilder.buildSelect(
+ HalfTy, IsZero, InL, MIRBuilder.buildSelect(HalfTy, IsShort, LoS, LoL));
+
+ auto Hi = MIRBuilder.buildSelect(HalfTy, IsShort, HiS, HiL);
+
+ ResultRegs[0] = Lo.getReg(0);
+ ResultRegs[1] = Hi.getReg(0);
+ break;
+ }
+ default:
+ llvm_unreachable("not a shift");
+ }
+
+ MIRBuilder.buildMerge(DstReg, ResultRegs);
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
LegalizerHelper::narrowScalarMul(MachineInstr &MI, unsigned TypeIdx, LLT NewTy) {
unsigned DstReg = MI.getOperand(0).getReg();
unsigned Src0 = MI.getOperand(1).getReg();
Modified: llvm/trunk/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp?rev=353455&r1=353454&r2=353455&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp (original)
+++ llvm/trunk/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp Thu Feb 7 11:37:44 2019
@@ -185,6 +185,12 @@ void MachineIRBuilder::validateBinaryOp(
assert((Res == Op0 && Res == Op1) && "type mismatch");
}
+void MachineIRBuilder::validateShiftOp(const LLT &Res, const LLT &Op0,
+ const LLT &Op1) {
+ assert((Res.isScalar() || Res.isVector()) && "invalid operand type");
+ assert((Res == Op0) && "type mismatch");
+}
+
MachineInstrBuilder MachineIRBuilder::buildGEP(unsigned Res, unsigned Op0,
unsigned Op1) {
assert(getMRI()->getType(Res).isPointer() &&
@@ -852,11 +858,8 @@ MachineInstrBuilder MachineIRBuilder::bu
}
case TargetOpcode::G_ADD:
case TargetOpcode::G_AND:
- case TargetOpcode::G_ASHR:
- case TargetOpcode::G_LSHR:
case TargetOpcode::G_MUL:
case TargetOpcode::G_OR:
- case TargetOpcode::G_SHL:
case TargetOpcode::G_SUB:
case TargetOpcode::G_XOR:
case TargetOpcode::G_UDIV:
@@ -870,6 +873,17 @@ MachineInstrBuilder MachineIRBuilder::bu
SrcOps[0].getLLTTy(*getMRI()),
SrcOps[1].getLLTTy(*getMRI()));
break;
+ }
+ case TargetOpcode::G_SHL:
+ case TargetOpcode::G_ASHR:
+ case TargetOpcode::G_LSHR: {
+ assert(DstOps.size() == 1 && "Invalid Dst");
+ assert(SrcOps.size() == 2 && "Invalid Srcs");
+ validateShiftOp(DstOps[0].getLLTTy(*getMRI()),
+ SrcOps[0].getLLTTy(*getMRI()),
+ SrcOps[1].getLLTTy(*getMRI()));
+ break;
+ }
case TargetOpcode::G_SEXT:
case TargetOpcode::G_ZEXT:
case TargetOpcode::G_ANYEXT:
@@ -879,7 +893,7 @@ MachineInstrBuilder MachineIRBuilder::bu
SrcOps[0].getLLTTy(*getMRI()), true);
break;
case TargetOpcode::G_TRUNC:
- case TargetOpcode::G_FPTRUNC:
+ case TargetOpcode::G_FPTRUNC: {
assert(DstOps.size() == 1 && "Invalid Dst");
assert(SrcOps.size() == 1 && "Invalid Srcs");
validateTruncExt(DstOps[0].getLLTTy(*getMRI()),
Modified: llvm/trunk/lib/Target/AArch64/AArch64LegalizerInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64LegalizerInfo.cpp?rev=353455&r1=353454&r2=353455&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64LegalizerInfo.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64LegalizerInfo.cpp Thu Feb 7 11:37:44 2019
@@ -85,6 +85,7 @@ AArch64LegalizerInfo::AArch64LegalizerIn
getActionDefinitionsBuilder(G_SHL)
.legalFor({{s32, s32}, {s64, s64},
{v2s32, v2s32}, {v4s32, v4s32}, {v2s64, v2s64}})
+ .clampScalar(1, s32, s64)
.clampScalar(0, s32, s64)
.widenScalarToNextPow2(0)
.clampNumElements(0, v2s32, v4s32)
@@ -105,6 +106,7 @@ AArch64LegalizerInfo::AArch64LegalizerIn
getActionDefinitionsBuilder({G_LSHR, G_ASHR})
.legalFor({{s32, s32}, {s64, s64}})
+ .clampScalar(1, s32, s64)
.clampScalar(0, s32, s64)
.minScalarSameAs(1, 0);
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp?rev=353455&r1=353454&r2=353455&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp Thu Feb 7 11:37:44 2019
@@ -439,11 +439,17 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo
.clampMaxNumElements(0, S16, 2);
} else
Shifts.legalFor({{S16, S32}, {S16, S16}});
+
+ Shifts.clampScalar(1, S16, S32);
Shifts.clampScalar(0, S16, S64);
- } else
+ } else {
+ // Make sure we legalize the shift amount type first, as the general
+ // expansion for the shifted type will produce much worse code if it hasn't
+ // been truncated already.
+ Shifts.clampScalar(1, S32, S32);
Shifts.clampScalar(0, S32, S64);
- Shifts.clampScalar(1, S32, S32)
- .scalarize(0);
+ }
+ Shifts.scalarize(0);
for (unsigned Op : {G_EXTRACT_VECTOR_ELT, G_INSERT_VECTOR_ELT}) {
unsigned VecTypeIdx = Op == G_EXTRACT_VECTOR_ELT ? 1 : 0;
Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir?rev=353455&r1=353454&r2=353455&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-merge-values.mir Thu Feb 7 11:37:44 2019
@@ -8,13 +8,13 @@ body: |
; CHECK-LABEL: name: test_merge_s4
; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
- ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
- ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
- ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C2]]
- ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
- ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C3]]
- ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND]], [[AND1]](s32)
+ ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C2]]
+ ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
+ ; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C3]]
+ ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[AND]](s32)
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 15
; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[C]](s64)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[TRUNC1]], [[C4]]
Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir?rev=353455&r1=353454&r2=353455&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-shift.mir Thu Feb 7 11:37:44 2019
@@ -7,30 +7,30 @@ body: |
; CHECK-LABEL: name: test_shift
; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x0
; CHECK: [[COPY1:%[0-9]+]]:_(s64) = COPY $x1
- ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
- ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC]], [[C]](s32)
- ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
- ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
- ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC1]], [[C1]]
+ ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[TRUNC]], [[C]]
+ ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[TRUNC1]], [[C1]](s32)
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
; CHECK: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[AND]](s32)
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32)
; CHECK: $w0 = COPY [[COPY2]](s32)
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[TRUNC2]], [[C2]]
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[TRUNC3]], [[C3]]
- ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[AND2]](s32)
+ ; CHECK: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND2]], [[AND1]](s32)
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; CHECK: $w0 = COPY [[COPY3]](s32)
- ; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; CHECK: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
- ; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[TRUNC5]], [[C4]]
- ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC4]], [[AND3]](s32)
+ ; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[COPY1]](s64)
+ ; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[TRUNC4]], [[C4]]
+ ; CHECK: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[TRUNC5]], [[AND3]](s32)
; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SHL1]](s32)
; CHECK: $w0 = COPY [[COPY4]](s32)
%0:_(s64) = COPY $x0
@@ -96,3 +96,109 @@ body: |
$x0 = COPY %2(s64)
...
+
+---
+name: test_shl_s128_s128
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: test_shl_s128_s128
+ ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $q0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $q1
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
+ ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; CHECK: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[TRUNC]], [[C]]
+ ; CHECK: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C]], [[TRUNC]]
+ ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[TRUNC]](s64), [[C]]
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
+ ; CHECK: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[TRUNC]](s64), [[C1]]
+ ; CHECK: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP1]](s32)
+ ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC]](s64)
+ ; CHECK: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC]](s64)
+ ; CHECK: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[SUB1]](s64)
+ ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SUB]](s64)
+ ; CHECK: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[SHL]], [[C2]]
+ ; CHECK: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[OR]], [[SHL2]]
+ ; CHECK: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC2]](s1), [[UV1]], [[SELECT1]]
+ ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
+ ; CHECK: $q0 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $q0
+ %1:_(s128) = COPY $q1
+ %2:_(s128) = G_SHL %0, %1
+ $q0 = COPY %2
+
+...
+
+---
+name: test_lshr_s128_s128
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: test_lshr_s128_s128
+ ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $q0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $q1
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
+ ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; CHECK: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[TRUNC]], [[C]]
+ ; CHECK: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C]], [[TRUNC]]
+ ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[TRUNC]](s64), [[C]]
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
+ ; CHECK: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[TRUNC]](s64), [[C1]]
+ ; CHECK: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP1]](s32)
+ ; CHECK: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[TRUNC]](s64)
+ ; CHECK: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s64)
+ ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s64)
+ ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
+ ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB]](s64)
+ ; CHECK: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[OR]], [[LSHR2]]
+ ; CHECK: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC2]](s1), [[UV]], [[SELECT]]
+ ; CHECK: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[LSHR]], [[C2]]
+ ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; CHECK: $q0 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $q0
+ %1:_(s128) = COPY $q1
+ %2:_(s128) = G_LSHR %0, %1
+ $q0 = COPY %2
+
+...
+
+---
+name: test_ashr_s128_s128
+body: |
+ bb.0:
+ ; CHECK-LABEL: name: test_ashr_s128_s128
+ ; CHECK: [[COPY:%[0-9]+]]:_(s128) = COPY $q0
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s128) = COPY $q1
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s64) = G_TRUNC [[COPY1]](s128)
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
+ ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; CHECK: [[SUB:%[0-9]+]]:_(s64) = G_SUB [[TRUNC]], [[C]]
+ ; CHECK: [[SUB1:%[0-9]+]]:_(s64) = G_SUB [[C]], [[TRUNC]]
+ ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[ICMP:%[0-9]+]]:_(s32) = G_ICMP intpred(ult), [[TRUNC]](s64), [[C]]
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP]](s32)
+ ; CHECK: [[ICMP1:%[0-9]+]]:_(s32) = G_ICMP intpred(eq), [[TRUNC]](s64), [[C1]]
+ ; CHECK: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[ICMP1]](s32)
+ ; CHECK: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[TRUNC]](s64)
+ ; CHECK: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s64)
+ ; CHECK: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB1]](s64)
+ ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[LSHR1]]
+ ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 63
+ ; CHECK: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C2]](s64)
+ ; CHECK: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[SUB]](s64)
+ ; CHECK: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[OR]], [[ASHR2]]
+ ; CHECK: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC2]](s1), [[UV]], [[SELECT]]
+ ; CHECK: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[TRUNC1]](s1), [[ASHR]], [[ASHR1]]
+ ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; CHECK: $q0 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $q0
+ %1:_(s128) = COPY $q1
+ %2:_(s128) = G_ASHR %0, %1
+ $q0 = COPY %2
+
+...
Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-unmerge-values.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-unmerge-values.mir?rev=353455&r1=353454&r2=353455&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-unmerge-values.mir (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/legalize-unmerge-values.mir Thu Feb 7 11:37:44 2019
@@ -13,8 +13,8 @@ body: |
; CHECK: [[ZEXT:%[0-9]+]]:_(s16) = G_ZEXT [[TRUNC]](s8)
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 4
; CHECK: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[C1]](s32)
- ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ZEXT]](s16)
; CHECK: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[TRUNC1]](s16)
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ZEXT]](s16)
; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ANYEXT]], [[ZEXT1]](s32)
; CHECK: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[ZEXT]](s16)
Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-ashr.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-ashr.mir?rev=353455&r1=353454&r2=353455&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-ashr.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-ashr.mir Thu Feb 7 11:37:44 2019
@@ -1,7 +1,7 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck -check-prefix=SI %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck -check-prefix=VI %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -O0 -run-pass=legalizer %s -o - | FileCheck -check-prefix=GFX9 %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer -global-isel-abort=0 -o - %s | FileCheck -check-prefix=SI %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer -global-isel-abort=0 -o - %s | FileCheck -check-prefix=VI %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -O0 -run-pass=legalizer -global-isel-abort=0 -o - %s | FileCheck -check-prefix=GFX9 %s
---
name: test_ashr_s32_s32
@@ -101,18 +101,14 @@ body: |
; VI-LABEL: name: test_ashr_s64_s16
; VI: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
- ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
- ; VI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[AND]](s32)
+ ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; VI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[TRUNC]](s16)
; VI: $vgpr0_vgpr1 = COPY [[ASHR]](s64)
; GFX9-LABEL: name: test_ashr_s64_s16
; GFX9: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
- ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
- ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[AND]](s32)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[COPY]], [[TRUNC]](s16)
; GFX9: $vgpr0_vgpr1 = COPY [[ASHR]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
@@ -168,13 +164,13 @@ body: |
; SI-LABEL: name: test_ashr_s16_s16
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
- ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
- ; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
- ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]]
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C1]](s32)
+ ; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
; SI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[AND]](s32)
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32)
; SI: $vgpr0 = COPY [[COPY4]](s32)
@@ -212,13 +208,13 @@ body: |
; SI-LABEL: name: test_ashr_s16_i8
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
- ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
- ; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
- ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]]
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C1]](s32)
+ ; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
; SI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[AND]](s32)
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32)
; SI: $vgpr0 = COPY [[COPY4]](s32)
@@ -228,8 +224,10 @@ body: |
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
- ; VI: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[TRUNC]], [[AND]](s32)
+ ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+ ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]]
+ ; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[AND]](s32)
+ ; VI: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[TRUNC]], [[TRUNC1]](s16)
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR]](s16)
; VI: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: test_ashr_s16_i8
@@ -238,8 +236,10 @@ body: |
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
- ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[TRUNC]], [[AND]](s32)
+ ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+ ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]]
+ ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[AND]](s32)
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[TRUNC]], [[TRUNC1]](s16)
; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR]](s16)
; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
@@ -260,42 +260,46 @@ body: |
; SI-LABEL: name: test_ashr_i8_i8
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
- ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
- ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
- ; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C]](s32)
- ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]]
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C1]](s32)
+ ; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SHL]], [[C1]](s32)
; SI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ASHR]], [[AND]](s32)
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32)
; SI: $vgpr0 = COPY [[COPY4]](s32)
; VI-LABEL: name: test_ashr_i8_i8
; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32)
- ; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC]](s16)
- ; VI: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[TRUNC]](s16)
- ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C1]]
- ; VI: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[ASHR]], [[AND]](s32)
+ ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+ ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]]
+ ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[AND]](s32)
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[C1]](s32)
+ ; VI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[TRUNC1]](s16)
+ ; VI: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[TRUNC1]](s16)
+ ; VI: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[ASHR]], [[TRUNC]](s16)
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR1]](s16)
; VI: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: test_ashr_i8_i8
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[C]](s32)
- ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC]](s16)
- ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[TRUNC]](s16)
- ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C1]]
- ; GFX9: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[ASHR]], [[AND]](s32)
+ ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+ ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]]
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[AND]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[C1]](s32)
+ ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[TRUNC1]](s16)
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SHL]], [[TRUNC1]](s16)
+ ; GFX9: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[ASHR]], [[TRUNC]](s16)
; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR1]](s16)
; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
@@ -492,12 +496,12 @@ body: |
; SI: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
; SI: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
; SI: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
- ; SI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[UV]](s16)
; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UV2]](s16)
+ ; SI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[UV]](s16)
; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT]], [[ZEXT]](s32)
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
- ; SI: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[UV1]](s16)
; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UV3]](s16)
+ ; SI: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[UV1]](s16)
; SI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SEXT1]], [[ZEXT1]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR1]](s32)
; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -578,16 +582,16 @@ body: |
; SI: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[COPY1]](<4 x s16>), 0
; SI: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[EXTRACT]](<3 x s16>)
; SI: [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[EXTRACT1]](<3 x s16>)
- ; SI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[UV]](s16)
; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UV3]](s16)
+ ; SI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[UV]](s16)
; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT]], [[ZEXT]](s32)
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
- ; SI: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[UV1]](s16)
; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UV4]](s16)
+ ; SI: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[UV1]](s16)
; SI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SEXT1]], [[ZEXT1]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR1]](s32)
- ; SI: [[SEXT2:%[0-9]+]]:_(s32) = G_SEXT [[UV2]](s16)
; SI: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UV5]](s16)
+ ; SI: [[SEXT2:%[0-9]+]]:_(s32) = G_SEXT [[UV2]](s16)
; SI: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SEXT2]], [[ZEXT2]](s32)
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR2]](s32)
; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16), [[TRUNC2]](s16)
@@ -646,20 +650,20 @@ body: |
; SI: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
; SI: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
; SI: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; SI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[UV]](s16)
; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UV4]](s16)
+ ; SI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[UV]](s16)
; SI: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT]], [[ZEXT]](s32)
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
- ; SI: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[UV1]](s16)
; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UV5]](s16)
+ ; SI: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[UV1]](s16)
; SI: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SEXT1]], [[ZEXT1]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR1]](s32)
- ; SI: [[SEXT2:%[0-9]+]]:_(s32) = G_SEXT [[UV2]](s16)
; SI: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UV6]](s16)
+ ; SI: [[SEXT2:%[0-9]+]]:_(s32) = G_SEXT [[UV2]](s16)
; SI: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SEXT2]], [[ZEXT2]](s32)
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR2]](s32)
- ; SI: [[SEXT3:%[0-9]+]]:_(s32) = G_SEXT [[UV3]](s16)
; SI: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UV7]](s16)
+ ; SI: [[SEXT3:%[0-9]+]]:_(s32) = G_SEXT [[UV3]](s16)
; SI: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[SEXT3]], [[ZEXT3]](s32)
; SI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR3]](s32)
; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16), [[TRUNC2]](s16), [[TRUNC3]](s16)
@@ -689,3 +693,855 @@ body: |
%2:_(<4 x s16>) = G_ASHR %0, %1
$vgpr0_vgpr1 = COPY %2
...
+
+---
+name: test_ashr_s128_s128
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_ashr_s128_s128
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; SI: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
+ ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; SI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[TRUNC]](s32)
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s32)
+ ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[LSHR1]]
+ ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; SI: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C2]](s32)
+ ; SI: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[SUB]](s32)
+ ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
+ ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
+ ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_ashr_s128_s128
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; VI: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
+ ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; VI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[TRUNC]](s32)
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s32)
+ ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[LSHR1]]
+ ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; VI: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C2]](s32)
+ ; VI: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[SUB]](s32)
+ ; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
+ ; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
+ ; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_ashr_s128_s128
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; GFX9: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[TRUNC]](s32)
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s32)
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[LSHR1]]
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C2]](s32)
+ ; GFX9: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[SUB]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
+ ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = COPY $vgpr4
+ %2:_(s128) = G_ZEXT %1
+ %3:_(s128) = G_ASHR %0, %2
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_ashr_s128_s132
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_ashr_s128_s132
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[C]]
+ ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY1]](s32), [[C]]
+ ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY1]](s32), [[C1]]
+ ; SI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[COPY1]](s32)
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[COPY1]](s32)
+ ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[LSHR1]]
+ ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; SI: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C2]](s32)
+ ; SI: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[SUB]](s32)
+ ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
+ ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
+ ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_ashr_s128_s132
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[C]]
+ ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY1]](s32), [[C]]
+ ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY1]](s32), [[C1]]
+ ; VI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[COPY1]](s32)
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[COPY1]](s32)
+ ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[LSHR1]]
+ ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; VI: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C2]](s32)
+ ; VI: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[SUB]](s32)
+ ; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
+ ; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
+ ; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_ashr_s128_s132
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[C]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY1]](s32), [[C]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY1]](s32), [[C1]]
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[COPY1]](s32)
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[COPY1]](s32)
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[LSHR1]]
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C2]](s32)
+ ; GFX9: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[SUB]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
+ ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = COPY $vgpr4
+ %2:_(s128) = G_ASHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %2
+...
+
+---
+name: test_ashr_s128_s32_0
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_ashr_s128_s32_0
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[UV]](s64), [[UV1]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_ashr_s128_s32_0
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[UV]](s64), [[UV1]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_ashr_s128_s32_0
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[UV]](s64), [[UV1]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 0
+ %3:_(s128) = G_ASHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+
+---
+name: test_ashr_s128_s32_23
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_ashr_s128_s32_23
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 41
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; SI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[ASHR]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_ashr_s128_s32_23
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 41
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; VI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[ASHR]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_ashr_s128_s32_23
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 41
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[ASHR]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 23
+ %3:_(s128) = G_ASHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_ashr_s128_s32_31
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_ashr_s128_s32_31
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; SI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[ASHR]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_ashr_s128_s32_31
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; VI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[ASHR]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_ashr_s128_s32_31
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[ASHR]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 31
+ %3:_(s128) = G_ASHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_ashr_s128_s32_32
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_ashr_s128_s32_32
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; SI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[ASHR]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_ashr_s128_s32_32
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; VI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[ASHR]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_ashr_s128_s32_32
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[ASHR]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 32
+ %3:_(s128) = G_ASHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_ashr_s128_s32_33
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_ashr_s128_s32_33
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; SI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[ASHR]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_ashr_s128_s32_33
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; VI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[ASHR]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_ashr_s128_s32_33
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[ASHR]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 33
+ %3:_(s128) = G_ASHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_ashr_s128_s32_127
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_ashr_s128_s32_127
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; SI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; SI: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C1]](s32)
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[ASHR]](s64), [[ASHR1]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_ashr_s128_s32_127
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; VI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; VI: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C1]](s32)
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[ASHR]](s64), [[ASHR1]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_ashr_s128_s32_127
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV1]], [[C1]](s32)
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[ASHR]](s64), [[ASHR1]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 127
+ %3:_(s128) = G_ASHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_ashr_s256_s256
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, $vgpr8
+
+ ; SI-LABEL: name: test_ashr_s256_s256
+ ; SI: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
+ ; SI: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
+ ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+ ; SI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
+ ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; SI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
+ ; SI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
+ ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
+ ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
+ ; SI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV3]], [[TRUNC]](s32)
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[TRUNC]](s32)
+ ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV3]], [[SUB3]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[LSHR1]]
+ ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; SI: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV3]], [[C4]](s32)
+ ; SI: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[UV3]], [[SUB2]](s32)
+ ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR]], [[ASHR2]]
+ ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV2]], [[SELECT]]
+ ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[ASHR]], [[ASHR1]]
+ ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; SI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C5]]
+ ; SI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C5]], [[TRUNC]]
+ ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C5]]
+ ; SI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C6]]
+ ; SI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[TRUNC]](s32)
+ ; SI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[TRUNC]](s32)
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB5]](s32)
+ ; SI: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[SHL]]
+ ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[SUB4]](s32)
+ ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[OR1]], [[LSHR4]]
+ ; SI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[UV4]], [[SELECT3]]
+ ; SI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[LSHR2]], [[C7]]
+ ; SI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; SI: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C8]]
+ ; SI: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C8]], [[SUB1]]
+ ; SI: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C8]]
+ ; SI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C9]]
+ ; SI: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB1]](s32)
+ ; SI: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB1]](s32)
+ ; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB7]](s32)
+ ; SI: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR6]], [[SHL1]]
+ ; SI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB6]](s32)
+ ; SI: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[OR2]], [[LSHR7]]
+ ; SI: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[UV6]], [[SELECT6]]
+ ; SI: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[LSHR5]], [[C10]]
+ ; SI: [[OR3:%[0-9]+]]:_(s64) = G_OR [[SELECT4]], [[SELECT7]]
+ ; SI: [[OR4:%[0-9]+]]:_(s64) = G_OR [[SELECT5]], [[SELECT8]]
+ ; SI: [[UV8:%[0-9]+]]:_(s64), [[UV9:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; SI: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; SI: [[ASHR3:%[0-9]+]]:_(s64) = G_ASHR [[UV9]], [[C11]](s32)
+ ; SI: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; SI: [[ASHR4:%[0-9]+]]:_(s64) = G_ASHR [[UV9]], [[C12]](s32)
+ ; SI: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV10:%[0-9]+]]:_(s64), [[UV11:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; SI: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C13]]
+ ; SI: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C13]], [[SUB]]
+ ; SI: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C13]]
+ ; SI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C14]]
+ ; SI: [[ASHR5:%[0-9]+]]:_(s64) = G_ASHR [[UV11]], [[SUB]](s32)
+ ; SI: [[LSHR8:%[0-9]+]]:_(s64) = G_LSHR [[UV10]], [[SUB]](s32)
+ ; SI: [[LSHR9:%[0-9]+]]:_(s64) = G_LSHR [[UV11]], [[SUB9]](s32)
+ ; SI: [[OR5:%[0-9]+]]:_(s64) = G_OR [[LSHR8]], [[LSHR9]]
+ ; SI: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; SI: [[ASHR6:%[0-9]+]]:_(s64) = G_ASHR [[UV11]], [[C15]](s32)
+ ; SI: [[ASHR7:%[0-9]+]]:_(s64) = G_ASHR [[UV11]], [[SUB8]](s32)
+ ; SI: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[OR5]], [[ASHR7]]
+ ; SI: [[SELECT10:%[0-9]+]]:_(s64) = G_SELECT [[ICMP9]](s1), [[UV10]], [[SELECT9]]
+ ; SI: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[ASHR5]], [[ASHR6]]
+ ; SI: [[SELECT12:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR3]], [[SELECT10]]
+ ; SI: [[SELECT13:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR4]], [[SELECT11]]
+ ; SI: [[UV12:%[0-9]+]]:_(s64), [[UV13:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; SI: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV12]], [[SELECT12]]
+ ; SI: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV13]], [[SELECT13]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
+ ; SI: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT1]], [[ASHR3]]
+ ; SI: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[ASHR4]]
+ ; SI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+ ; SI: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+ ; VI-LABEL: name: test_ashr_s256_s256
+ ; VI: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
+ ; VI: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
+ ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+ ; VI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
+ ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; VI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
+ ; VI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
+ ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
+ ; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
+ ; VI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV3]], [[TRUNC]](s32)
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[TRUNC]](s32)
+ ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV3]], [[SUB3]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[LSHR1]]
+ ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; VI: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV3]], [[C4]](s32)
+ ; VI: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[UV3]], [[SUB2]](s32)
+ ; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR]], [[ASHR2]]
+ ; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV2]], [[SELECT]]
+ ; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[ASHR]], [[ASHR1]]
+ ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; VI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C5]]
+ ; VI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C5]], [[TRUNC]]
+ ; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C5]]
+ ; VI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C6]]
+ ; VI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[TRUNC]](s32)
+ ; VI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[TRUNC]](s32)
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB5]](s32)
+ ; VI: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[SHL]]
+ ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[SUB4]](s32)
+ ; VI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[OR1]], [[LSHR4]]
+ ; VI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[UV4]], [[SELECT3]]
+ ; VI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[LSHR2]], [[C7]]
+ ; VI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; VI: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C8]]
+ ; VI: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C8]], [[SUB1]]
+ ; VI: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C8]]
+ ; VI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C9]]
+ ; VI: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB1]](s32)
+ ; VI: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB1]](s32)
+ ; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB7]](s32)
+ ; VI: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR6]], [[SHL1]]
+ ; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB6]](s32)
+ ; VI: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[OR2]], [[LSHR7]]
+ ; VI: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[UV6]], [[SELECT6]]
+ ; VI: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[LSHR5]], [[C10]]
+ ; VI: [[OR3:%[0-9]+]]:_(s64) = G_OR [[SELECT4]], [[SELECT7]]
+ ; VI: [[OR4:%[0-9]+]]:_(s64) = G_OR [[SELECT5]], [[SELECT8]]
+ ; VI: [[UV8:%[0-9]+]]:_(s64), [[UV9:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; VI: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; VI: [[ASHR3:%[0-9]+]]:_(s64) = G_ASHR [[UV9]], [[C11]](s32)
+ ; VI: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; VI: [[ASHR4:%[0-9]+]]:_(s64) = G_ASHR [[UV9]], [[C12]](s32)
+ ; VI: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV10:%[0-9]+]]:_(s64), [[UV11:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; VI: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C13]]
+ ; VI: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C13]], [[SUB]]
+ ; VI: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C13]]
+ ; VI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C14]]
+ ; VI: [[ASHR5:%[0-9]+]]:_(s64) = G_ASHR [[UV11]], [[SUB]](s32)
+ ; VI: [[LSHR8:%[0-9]+]]:_(s64) = G_LSHR [[UV10]], [[SUB]](s32)
+ ; VI: [[LSHR9:%[0-9]+]]:_(s64) = G_LSHR [[UV11]], [[SUB9]](s32)
+ ; VI: [[OR5:%[0-9]+]]:_(s64) = G_OR [[LSHR8]], [[LSHR9]]
+ ; VI: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; VI: [[ASHR6:%[0-9]+]]:_(s64) = G_ASHR [[UV11]], [[C15]](s32)
+ ; VI: [[ASHR7:%[0-9]+]]:_(s64) = G_ASHR [[UV11]], [[SUB8]](s32)
+ ; VI: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[OR5]], [[ASHR7]]
+ ; VI: [[SELECT10:%[0-9]+]]:_(s64) = G_SELECT [[ICMP9]](s1), [[UV10]], [[SELECT9]]
+ ; VI: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[ASHR5]], [[ASHR6]]
+ ; VI: [[SELECT12:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR3]], [[SELECT10]]
+ ; VI: [[SELECT13:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR4]], [[SELECT11]]
+ ; VI: [[UV12:%[0-9]+]]:_(s64), [[UV13:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; VI: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV12]], [[SELECT12]]
+ ; VI: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV13]], [[SELECT13]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
+ ; VI: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT1]], [[ASHR3]]
+ ; VI: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[ASHR4]]
+ ; VI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+ ; VI: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+ ; GFX9-LABEL: name: test_ashr_s256_s256
+ ; GFX9: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
+ ; GFX9: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+ ; GFX9: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; GFX9: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
+ ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
+ ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
+ ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV3]], [[TRUNC]](s32)
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[TRUNC]](s32)
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV3]], [[SUB3]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[LSHR1]]
+ ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV3]], [[C4]](s32)
+ ; GFX9: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[UV3]], [[SUB2]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR]], [[ASHR2]]
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV2]], [[SELECT]]
+ ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[ASHR]], [[ASHR1]]
+ ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; GFX9: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C5]]
+ ; GFX9: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C5]], [[TRUNC]]
+ ; GFX9: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C5]]
+ ; GFX9: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C6]]
+ ; GFX9: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[TRUNC]](s32)
+ ; GFX9: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[TRUNC]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB5]](s32)
+ ; GFX9: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[SHL]]
+ ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[SUB4]](s32)
+ ; GFX9: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[OR1]], [[LSHR4]]
+ ; GFX9: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[UV4]], [[SELECT3]]
+ ; GFX9: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[LSHR2]], [[C7]]
+ ; GFX9: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; GFX9: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C8]]
+ ; GFX9: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C8]], [[SUB1]]
+ ; GFX9: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C8]]
+ ; GFX9: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C9]]
+ ; GFX9: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB1]](s32)
+ ; GFX9: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB1]](s32)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB7]](s32)
+ ; GFX9: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR6]], [[SHL1]]
+ ; GFX9: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB6]](s32)
+ ; GFX9: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[OR2]], [[LSHR7]]
+ ; GFX9: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[UV6]], [[SELECT6]]
+ ; GFX9: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[LSHR5]], [[C10]]
+ ; GFX9: [[OR3:%[0-9]+]]:_(s64) = G_OR [[SELECT4]], [[SELECT7]]
+ ; GFX9: [[OR4:%[0-9]+]]:_(s64) = G_OR [[SELECT5]], [[SELECT8]]
+ ; GFX9: [[UV8:%[0-9]+]]:_(s64), [[UV9:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; GFX9: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[ASHR3:%[0-9]+]]:_(s64) = G_ASHR [[UV9]], [[C11]](s32)
+ ; GFX9: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[ASHR4:%[0-9]+]]:_(s64) = G_ASHR [[UV9]], [[C12]](s32)
+ ; GFX9: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV10:%[0-9]+]]:_(s64), [[UV11:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; GFX9: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C13]]
+ ; GFX9: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C13]], [[SUB]]
+ ; GFX9: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C13]]
+ ; GFX9: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C14]]
+ ; GFX9: [[ASHR5:%[0-9]+]]:_(s64) = G_ASHR [[UV11]], [[SUB]](s32)
+ ; GFX9: [[LSHR8:%[0-9]+]]:_(s64) = G_LSHR [[UV10]], [[SUB]](s32)
+ ; GFX9: [[LSHR9:%[0-9]+]]:_(s64) = G_LSHR [[UV11]], [[SUB9]](s32)
+ ; GFX9: [[OR5:%[0-9]+]]:_(s64) = G_OR [[LSHR8]], [[LSHR9]]
+ ; GFX9: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[ASHR6:%[0-9]+]]:_(s64) = G_ASHR [[UV11]], [[C15]](s32)
+ ; GFX9: [[ASHR7:%[0-9]+]]:_(s64) = G_ASHR [[UV11]], [[SUB8]](s32)
+ ; GFX9: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[OR5]], [[ASHR7]]
+ ; GFX9: [[SELECT10:%[0-9]+]]:_(s64) = G_SELECT [[ICMP9]](s1), [[UV10]], [[SELECT9]]
+ ; GFX9: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[ASHR5]], [[ASHR6]]
+ ; GFX9: [[SELECT12:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR3]], [[SELECT10]]
+ ; GFX9: [[SELECT13:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR4]], [[SELECT11]]
+ ; GFX9: [[UV12:%[0-9]+]]:_(s64), [[UV13:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; GFX9: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV12]], [[SELECT12]]
+ ; GFX9: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV13]], [[SELECT13]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
+ ; GFX9: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT1]], [[ASHR3]]
+ ; GFX9: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[ASHR4]]
+ ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+ ; GFX9: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+ %0:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ %1:_(s32) = COPY $vgpr8
+ %2:_(s256) = G_ZEXT %1
+ %3:_(s256) = G_ASHR %0, %2
+ $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %3
+...
+
+---
+name: test_ashr_v2s128_v2s32
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, $vgpr4_vgpr5
+
+ ; SI-LABEL: name: test_ashr_v2s128_v2s32
+ ; SI: [[COPY:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; SI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5
+ ; SI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](<2 x s128>)
+ ; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UV2]], [[C]]
+ ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UV2]]
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV2]](s32), [[C]]
+ ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV2]](s32), [[C1]]
+ ; SI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV5]], [[UV2]](s32)
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[UV2]](s32)
+ ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[SUB1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[LSHR1]]
+ ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; SI: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV5]], [[C2]](s32)
+ ; SI: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[UV5]], [[SUB]](s32)
+ ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
+ ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV4]], [[SELECT]]
+ ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; SI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[UV3]], [[C3]]
+ ; SI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[UV3]]
+ ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV3]](s32), [[C3]]
+ ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV3]](s32), [[C4]]
+ ; SI: [[ASHR3:%[0-9]+]]:_(s64) = G_ASHR [[UV7]], [[UV3]](s32)
+ ; SI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[UV3]](s32)
+ ; SI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB3]](s32)
+ ; SI: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR2]], [[LSHR3]]
+ ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; SI: [[ASHR4:%[0-9]+]]:_(s64) = G_ASHR [[UV7]], [[C5]](s32)
+ ; SI: [[ASHR5:%[0-9]+]]:_(s64) = G_ASHR [[UV7]], [[SUB2]](s32)
+ ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR1]], [[ASHR5]]
+ ; SI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV6]], [[SELECT3]]
+ ; SI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[ASHR3]], [[ASHR4]]
+ ; SI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT4]](s64), [[SELECT5]](s64)
+ ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s128>) = G_BUILD_VECTOR [[MV]](s128), [[MV1]](s128)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<2 x s128>)
+ ; VI-LABEL: name: test_ashr_v2s128_v2s32
+ ; VI: [[COPY:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; VI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5
+ ; VI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](<2 x s128>)
+ ; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UV2]], [[C]]
+ ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UV2]]
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV2]](s32), [[C]]
+ ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV2]](s32), [[C1]]
+ ; VI: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV5]], [[UV2]](s32)
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[UV2]](s32)
+ ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[SUB1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[LSHR1]]
+ ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; VI: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV5]], [[C2]](s32)
+ ; VI: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[UV5]], [[SUB]](s32)
+ ; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
+ ; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV4]], [[SELECT]]
+ ; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; VI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[UV3]], [[C3]]
+ ; VI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[UV3]]
+ ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV3]](s32), [[C3]]
+ ; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV3]](s32), [[C4]]
+ ; VI: [[ASHR3:%[0-9]+]]:_(s64) = G_ASHR [[UV7]], [[UV3]](s32)
+ ; VI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[UV3]](s32)
+ ; VI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB3]](s32)
+ ; VI: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR2]], [[LSHR3]]
+ ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; VI: [[ASHR4:%[0-9]+]]:_(s64) = G_ASHR [[UV7]], [[C5]](s32)
+ ; VI: [[ASHR5:%[0-9]+]]:_(s64) = G_ASHR [[UV7]], [[SUB2]](s32)
+ ; VI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR1]], [[ASHR5]]
+ ; VI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV6]], [[SELECT3]]
+ ; VI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[ASHR3]], [[ASHR4]]
+ ; VI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT4]](s64), [[SELECT5]](s64)
+ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s128>) = G_BUILD_VECTOR [[MV]](s128), [[MV1]](s128)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<2 x s128>)
+ ; GFX9-LABEL: name: test_ashr_v2s128_v2s32
+ ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5
+ ; GFX9: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](<2 x s128>)
+ ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UV2]], [[C]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UV2]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV2]](s32), [[C]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV2]](s32), [[C1]]
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[UV5]], [[UV2]](s32)
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[UV2]](s32)
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[SUB1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[LSHR1]]
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[UV5]], [[C2]](s32)
+ ; GFX9: [[ASHR2:%[0-9]+]]:_(s64) = G_ASHR [[UV5]], [[SUB]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[ASHR2]]
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV4]], [[SELECT]]
+ ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[ASHR]], [[ASHR1]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; GFX9: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[UV3]], [[C3]]
+ ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[UV3]]
+ ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV3]](s32), [[C3]]
+ ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV3]](s32), [[C4]]
+ ; GFX9: [[ASHR3:%[0-9]+]]:_(s64) = G_ASHR [[UV7]], [[UV3]](s32)
+ ; GFX9: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[UV3]](s32)
+ ; GFX9: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB3]](s32)
+ ; GFX9: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR2]], [[LSHR3]]
+ ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[ASHR4:%[0-9]+]]:_(s64) = G_ASHR [[UV7]], [[C5]](s32)
+ ; GFX9: [[ASHR5:%[0-9]+]]:_(s64) = G_ASHR [[UV7]], [[SUB2]](s32)
+ ; GFX9: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR1]], [[ASHR5]]
+ ; GFX9: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV6]], [[SELECT3]]
+ ; GFX9: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[ASHR3]], [[ASHR4]]
+ ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT4]](s64), [[SELECT5]](s64)
+ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s128>) = G_BUILD_VECTOR [[MV]](s128), [[MV1]](s128)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<2 x s128>)
+ %0:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ %1:_(<2 x s32>) = COPY $vgpr4_vgpr5
+ %2:_(<2 x s128>) = G_ASHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
+...
Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-lshr.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-lshr.mir?rev=353455&r1=353454&r2=353455&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-lshr.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-lshr.mir Thu Feb 7 11:37:44 2019
@@ -1,7 +1,7 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck -check-prefix=SI %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck -check-prefix=VI %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -O0 -run-pass=legalizer %s -o - | FileCheck -check-prefix=GFX9 %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer -global-isel-abort=0 -o - %s | FileCheck -check-prefix=SI %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer -global-isel-abort=0 -o - %s | FileCheck -check-prefix=VI %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -O0 -run-pass=legalizer -global-isel-abort=0 -o - %s | FileCheck -check-prefix=GFX9 %s
---
name: test_lshr_s32_s32
@@ -101,18 +101,14 @@ body: |
; VI-LABEL: name: test_lshr_s64_s16
; VI: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
- ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
- ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY]], [[AND]](s32)
+ ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY]], [[TRUNC]](s16)
; VI: $vgpr0_vgpr1 = COPY [[LSHR]](s64)
; GFX9-LABEL: name: test_lshr_s64_s16
; GFX9: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
- ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
- ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY]], [[AND]](s32)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[COPY]], [[TRUNC]](s16)
; GFX9: $vgpr0_vgpr1 = COPY [[LSHR]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
@@ -168,12 +164,12 @@ body: |
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]]
- ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[AND1]](s32)
+ ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[AND]](s32)
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; SI: $vgpr0 = COPY [[COPY4]](s32)
; VI-LABEL: name: test_lshr_s16_s16
@@ -210,13 +206,13 @@ body: |
; SI-LABEL: name: test_lshr_s16_i8
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
- ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]]
- ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[AND1]](s32)
+ ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[AND]](s32)
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; SI: $vgpr0 = COPY [[COPY4]](s32)
; VI-LABEL: name: test_lshr_s16_i8
@@ -225,8 +221,10 @@ body: |
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
- ; VI: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[AND]](s32)
+ ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+ ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]]
+ ; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[AND]](s32)
+ ; VI: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[TRUNC1]](s16)
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
; VI: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: test_lshr_s16_i8
@@ -235,8 +233,10 @@ body: |
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
- ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[AND]](s32)
+ ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+ ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]]
+ ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[AND]](s32)
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[TRUNC1]](s16)
; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
@@ -258,40 +258,44 @@ body: |
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; SI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C1]]
- ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND]], [[AND1]](s32)
+ ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[AND1]], [[AND]](s32)
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
; SI: $vgpr0 = COPY [[COPY4]](s32)
; VI-LABEL: name: test_lshr_i8_i8
; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]]
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[AND]](s32)
; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; VI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C1]]
- ; VI: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[AND1]](s32)
+ ; VI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; VI: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
+ ; VI: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[COPY5]]
+ ; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[AND1]](s32)
+ ; VI: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[TRUNC]](s16)
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
; VI: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: test_lshr_i8_i8
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]]
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[AND]](s32)
; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; GFX9: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C1]]
- ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC]], [[AND1]](s32)
+ ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
+ ; GFX9: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[COPY5]]
+ ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[AND1]](s32)
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[TRUNC1]], [[TRUNC]](s16)
; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
@@ -488,13 +492,13 @@ body: |
; SI: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
; SI: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
; SI: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
- ; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UV]](s16)
- ; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UV2]](s16)
- ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT]], [[ZEXT1]](s32)
+ ; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UV2]](s16)
+ ; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UV]](s16)
+ ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT1]], [[ZEXT]](s32)
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; SI: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UV1]](s16)
- ; SI: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UV3]](s16)
- ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT2]], [[ZEXT3]](s32)
+ ; SI: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UV3]](s16)
+ ; SI: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UV1]](s16)
+ ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT3]], [[ZEXT2]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
; SI: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
@@ -574,17 +578,17 @@ body: |
; SI: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[COPY1]](<4 x s16>), 0
; SI: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[EXTRACT]](<3 x s16>)
; SI: [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[EXTRACT1]](<3 x s16>)
- ; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UV]](s16)
- ; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UV3]](s16)
- ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT]], [[ZEXT1]](s32)
+ ; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UV3]](s16)
+ ; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UV]](s16)
+ ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT1]], [[ZEXT]](s32)
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; SI: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UV1]](s16)
- ; SI: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UV4]](s16)
- ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT2]], [[ZEXT3]](s32)
+ ; SI: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UV4]](s16)
+ ; SI: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UV1]](s16)
+ ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT3]], [[ZEXT2]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
- ; SI: [[ZEXT4:%[0-9]+]]:_(s32) = G_ZEXT [[UV2]](s16)
- ; SI: [[ZEXT5:%[0-9]+]]:_(s32) = G_ZEXT [[UV5]](s16)
- ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT4]], [[ZEXT5]](s32)
+ ; SI: [[ZEXT4:%[0-9]+]]:_(s32) = G_ZEXT [[UV5]](s16)
+ ; SI: [[ZEXT5:%[0-9]+]]:_(s32) = G_ZEXT [[UV2]](s16)
+ ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT5]], [[ZEXT4]](s32)
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16), [[TRUNC2]](s16)
; SI: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
@@ -642,21 +646,21 @@ body: |
; SI: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
; SI: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
; SI: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UV]](s16)
- ; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UV4]](s16)
- ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT]], [[ZEXT1]](s32)
+ ; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UV4]](s16)
+ ; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UV]](s16)
+ ; SI: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT1]], [[ZEXT]](s32)
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; SI: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UV1]](s16)
- ; SI: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UV5]](s16)
- ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT2]], [[ZEXT3]](s32)
+ ; SI: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UV5]](s16)
+ ; SI: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UV1]](s16)
+ ; SI: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT3]], [[ZEXT2]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
- ; SI: [[ZEXT4:%[0-9]+]]:_(s32) = G_ZEXT [[UV2]](s16)
- ; SI: [[ZEXT5:%[0-9]+]]:_(s32) = G_ZEXT [[UV6]](s16)
- ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT4]], [[ZEXT5]](s32)
+ ; SI: [[ZEXT4:%[0-9]+]]:_(s32) = G_ZEXT [[UV6]](s16)
+ ; SI: [[ZEXT5:%[0-9]+]]:_(s32) = G_ZEXT [[UV2]](s16)
+ ; SI: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT5]], [[ZEXT4]](s32)
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
- ; SI: [[ZEXT6:%[0-9]+]]:_(s32) = G_ZEXT [[UV3]](s16)
- ; SI: [[ZEXT7:%[0-9]+]]:_(s32) = G_ZEXT [[UV7]](s16)
- ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT6]], [[ZEXT7]](s32)
+ ; SI: [[ZEXT6:%[0-9]+]]:_(s32) = G_ZEXT [[UV7]](s16)
+ ; SI: [[ZEXT7:%[0-9]+]]:_(s32) = G_ZEXT [[UV3]](s16)
+ ; SI: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT7]], [[ZEXT6]](s32)
; SI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16), [[TRUNC2]](s16), [[TRUNC3]](s16)
; SI: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
@@ -685,3 +689,825 @@ body: |
%2:_(<4 x s16>) = G_LSHR %0, %1
$vgpr0_vgpr1 = COPY %2
...
+
+---
+name: test_lshr_s128_s128
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_lshr_s128_s128
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; SI: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
+ ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[TRUNC]](s32)
+ ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s32)
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
+ ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB]](s32)
+ ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
+ ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
+ ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C2]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_lshr_s128_s128
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; VI: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
+ ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[TRUNC]](s32)
+ ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s32)
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
+ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB]](s32)
+ ; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
+ ; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
+ ; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C2]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_lshr_s128_s128
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; GFX9: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[TRUNC]](s32)
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[TRUNC]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
+ ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
+ ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C2]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = COPY $vgpr4
+ %2:_(s128) = G_ZEXT %1
+ %3:_(s128) = G_LSHR %0, %2
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_lshr_s128_s132
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_lshr_s128_s132
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[C]]
+ ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY1]](s32), [[C]]
+ ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY1]](s32), [[C1]]
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[COPY1]](s32)
+ ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[COPY1]](s32)
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
+ ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB]](s32)
+ ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
+ ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
+ ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C2]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_lshr_s128_s132
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[C]]
+ ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY1]](s32), [[C]]
+ ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY1]](s32), [[C1]]
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[COPY1]](s32)
+ ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[COPY1]](s32)
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
+ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB]](s32)
+ ; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
+ ; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
+ ; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C2]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_lshr_s128_s132
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[C]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY1]](s32), [[C]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY1]](s32), [[C1]]
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[COPY1]](s32)
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[COPY1]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[SUB1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
+ ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[SUB]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV]], [[SELECT]]
+ ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C2]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = COPY $vgpr4
+ %2:_(s128) = G_LSHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %2
+...
+
+---
+name: test_lshr_s128_s32_0
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_lshr_s128_s32_0
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[UV]](s64), [[UV1]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_lshr_s128_s32_0
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[UV]](s64), [[UV1]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_lshr_s128_s32_0
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[UV]](s64), [[UV1]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 0
+ %3:_(s128) = G_LSHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+
+---
+name: test_lshr_s128_s32_23
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_lshr_s128_s32_23
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 41
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C]](s32)
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[LSHR1]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_lshr_s128_s32_23
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 41
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C]](s32)
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[LSHR1]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_lshr_s128_s32_23
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 41
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C]](s32)
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[LSHR1]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 23
+ %3:_(s128) = G_LSHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_lshr_s128_s32_31
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_lshr_s128_s32_31
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C]](s32)
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[LSHR1]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_lshr_s128_s32_31
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C]](s32)
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[LSHR1]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_lshr_s128_s32_31
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C]](s32)
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[LSHR1]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 31
+ %3:_(s128) = G_LSHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_lshr_s128_s32_32
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_lshr_s128_s32_32
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C]](s32)
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[LSHR1]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_lshr_s128_s32_32
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C]](s32)
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[LSHR1]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_lshr_s128_s32_32
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C]](s32)
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[LSHR1]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 32
+ %3:_(s128) = G_LSHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_lshr_s128_s32_33
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_lshr_s128_s32_33
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C]](s32)
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[LSHR1]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_lshr_s128_s32_33
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C]](s32)
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[LSHR1]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_lshr_s128_s32_33
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR]], [[SHL]]
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C]](s32)
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR]](s64), [[LSHR1]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 33
+ %3:_(s128) = G_LSHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_lshr_s128_s32_127
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_lshr_s128_s32_127
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C]](s32)
+ ; SI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[LSHR]](s64), [[C1]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_lshr_s128_s32_127
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C]](s32)
+ ; VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[LSHR]](s64), [[C1]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_lshr_s128_s32_127
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV1]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[LSHR]](s64), [[C1]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 127
+ %3:_(s128) = G_LSHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_lshr_s256_s256
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, $vgpr8
+
+ ; SI-LABEL: name: test_lshr_s256_s256
+ ; SI: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
+ ; SI: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
+ ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+ ; SI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
+ ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; SI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
+ ; SI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
+ ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
+ ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV3]], [[TRUNC]](s32)
+ ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[TRUNC]](s32)
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[SUB3]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
+ ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV3]], [[SUB2]](s32)
+ ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR]], [[LSHR2]]
+ ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV2]], [[SELECT]]
+ ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[LSHR]], [[C4]]
+ ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; SI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C5]]
+ ; SI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C5]], [[TRUNC]]
+ ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C5]]
+ ; SI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C6]]
+ ; SI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[TRUNC]](s32)
+ ; SI: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[TRUNC]](s32)
+ ; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB5]](s32)
+ ; SI: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR4]], [[SHL1]]
+ ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[SUB4]](s32)
+ ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[OR1]], [[LSHR5]]
+ ; SI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[UV4]], [[SELECT3]]
+ ; SI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[LSHR3]], [[C7]]
+ ; SI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; SI: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C8]]
+ ; SI: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C8]], [[SUB1]]
+ ; SI: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C8]]
+ ; SI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C9]]
+ ; SI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB1]](s32)
+ ; SI: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB1]](s32)
+ ; SI: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB7]](s32)
+ ; SI: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL3]], [[LSHR6]]
+ ; SI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[UV6]], [[SUB6]](s32)
+ ; SI: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[SHL2]], [[C10]]
+ ; SI: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[OR2]], [[SHL4]]
+ ; SI: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[UV7]], [[SELECT7]]
+ ; SI: [[OR3:%[0-9]+]]:_(s64) = G_OR [[SELECT4]], [[SELECT6]]
+ ; SI: [[OR4:%[0-9]+]]:_(s64) = G_OR [[SELECT5]], [[SELECT8]]
+ ; SI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV8:%[0-9]+]]:_(s64), [[UV9:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; SI: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C13]]
+ ; SI: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C13]], [[SUB]]
+ ; SI: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C13]]
+ ; SI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C14]]
+ ; SI: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[UV9]], [[SUB]](s32)
+ ; SI: [[LSHR8:%[0-9]+]]:_(s64) = G_LSHR [[UV8]], [[SUB]](s32)
+ ; SI: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV9]], [[SUB9]](s32)
+ ; SI: [[OR5:%[0-9]+]]:_(s64) = G_OR [[LSHR8]], [[SHL5]]
+ ; SI: [[C15:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[LSHR9:%[0-9]+]]:_(s64) = G_LSHR [[UV9]], [[SUB8]](s32)
+ ; SI: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[OR5]], [[LSHR9]]
+ ; SI: [[SELECT10:%[0-9]+]]:_(s64) = G_SELECT [[ICMP9]](s1), [[UV8]], [[SELECT9]]
+ ; SI: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[LSHR7]], [[C15]]
+ ; SI: [[SELECT12:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR3]], [[SELECT10]]
+ ; SI: [[SELECT13:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR4]], [[SELECT11]]
+ ; SI: [[UV10:%[0-9]+]]:_(s64), [[UV11:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; SI: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV10]], [[SELECT12]]
+ ; SI: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV11]], [[SELECT13]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
+ ; SI: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT1]], [[C11]]
+ ; SI: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[C12]]
+ ; SI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+ ; SI: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+ ; VI-LABEL: name: test_lshr_s256_s256
+ ; VI: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
+ ; VI: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
+ ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+ ; VI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
+ ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; VI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
+ ; VI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
+ ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
+ ; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV3]], [[TRUNC]](s32)
+ ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[TRUNC]](s32)
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[SUB3]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
+ ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV3]], [[SUB2]](s32)
+ ; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR]], [[LSHR2]]
+ ; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV2]], [[SELECT]]
+ ; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[LSHR]], [[C4]]
+ ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; VI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C5]]
+ ; VI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C5]], [[TRUNC]]
+ ; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C5]]
+ ; VI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C6]]
+ ; VI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[TRUNC]](s32)
+ ; VI: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[TRUNC]](s32)
+ ; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB5]](s32)
+ ; VI: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR4]], [[SHL1]]
+ ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[SUB4]](s32)
+ ; VI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[OR1]], [[LSHR5]]
+ ; VI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[UV4]], [[SELECT3]]
+ ; VI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[LSHR3]], [[C7]]
+ ; VI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; VI: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C8]]
+ ; VI: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C8]], [[SUB1]]
+ ; VI: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C8]]
+ ; VI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C9]]
+ ; VI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB1]](s32)
+ ; VI: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB1]](s32)
+ ; VI: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB7]](s32)
+ ; VI: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL3]], [[LSHR6]]
+ ; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[UV6]], [[SUB6]](s32)
+ ; VI: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[SHL2]], [[C10]]
+ ; VI: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[OR2]], [[SHL4]]
+ ; VI: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[UV7]], [[SELECT7]]
+ ; VI: [[OR3:%[0-9]+]]:_(s64) = G_OR [[SELECT4]], [[SELECT6]]
+ ; VI: [[OR4:%[0-9]+]]:_(s64) = G_OR [[SELECT5]], [[SELECT8]]
+ ; VI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV8:%[0-9]+]]:_(s64), [[UV9:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; VI: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C13]]
+ ; VI: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C13]], [[SUB]]
+ ; VI: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C13]]
+ ; VI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C14]]
+ ; VI: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[UV9]], [[SUB]](s32)
+ ; VI: [[LSHR8:%[0-9]+]]:_(s64) = G_LSHR [[UV8]], [[SUB]](s32)
+ ; VI: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV9]], [[SUB9]](s32)
+ ; VI: [[OR5:%[0-9]+]]:_(s64) = G_OR [[LSHR8]], [[SHL5]]
+ ; VI: [[C15:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[LSHR9:%[0-9]+]]:_(s64) = G_LSHR [[UV9]], [[SUB8]](s32)
+ ; VI: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[OR5]], [[LSHR9]]
+ ; VI: [[SELECT10:%[0-9]+]]:_(s64) = G_SELECT [[ICMP9]](s1), [[UV8]], [[SELECT9]]
+ ; VI: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[LSHR7]], [[C15]]
+ ; VI: [[SELECT12:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR3]], [[SELECT10]]
+ ; VI: [[SELECT13:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR4]], [[SELECT11]]
+ ; VI: [[UV10:%[0-9]+]]:_(s64), [[UV11:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; VI: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV10]], [[SELECT12]]
+ ; VI: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV11]], [[SELECT13]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
+ ; VI: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT1]], [[C11]]
+ ; VI: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[C12]]
+ ; VI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+ ; VI: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+ ; GFX9-LABEL: name: test_lshr_s256_s256
+ ; GFX9: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
+ ; GFX9: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+ ; GFX9: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; GFX9: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
+ ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
+ ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
+ ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV3]], [[TRUNC]](s32)
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[TRUNC]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[SUB3]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
+ ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV3]], [[SUB2]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR]], [[LSHR2]]
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV2]], [[SELECT]]
+ ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[LSHR]], [[C4]]
+ ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; GFX9: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C5]]
+ ; GFX9: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C5]], [[TRUNC]]
+ ; GFX9: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C5]]
+ ; GFX9: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C6]]
+ ; GFX9: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[TRUNC]](s32)
+ ; GFX9: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[TRUNC]](s32)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB5]](s32)
+ ; GFX9: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR4]], [[SHL1]]
+ ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[SUB4]](s32)
+ ; GFX9: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[OR1]], [[LSHR5]]
+ ; GFX9: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[UV4]], [[SELECT3]]
+ ; GFX9: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[LSHR3]], [[C7]]
+ ; GFX9: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; GFX9: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C8]]
+ ; GFX9: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C8]], [[SUB1]]
+ ; GFX9: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C8]]
+ ; GFX9: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C9]]
+ ; GFX9: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB1]](s32)
+ ; GFX9: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB1]](s32)
+ ; GFX9: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB7]](s32)
+ ; GFX9: [[OR2:%[0-9]+]]:_(s64) = G_OR [[SHL3]], [[LSHR6]]
+ ; GFX9: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[UV6]], [[SUB6]](s32)
+ ; GFX9: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[SHL2]], [[C10]]
+ ; GFX9: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[OR2]], [[SHL4]]
+ ; GFX9: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[UV7]], [[SELECT7]]
+ ; GFX9: [[OR3:%[0-9]+]]:_(s64) = G_OR [[SELECT4]], [[SELECT6]]
+ ; GFX9: [[OR4:%[0-9]+]]:_(s64) = G_OR [[SELECT5]], [[SELECT8]]
+ ; GFX9: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV8:%[0-9]+]]:_(s64), [[UV9:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; GFX9: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C13]]
+ ; GFX9: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C13]], [[SUB]]
+ ; GFX9: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C13]]
+ ; GFX9: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C14]]
+ ; GFX9: [[LSHR7:%[0-9]+]]:_(s64) = G_LSHR [[UV9]], [[SUB]](s32)
+ ; GFX9: [[LSHR8:%[0-9]+]]:_(s64) = G_LSHR [[UV8]], [[SUB]](s32)
+ ; GFX9: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV9]], [[SUB9]](s32)
+ ; GFX9: [[OR5:%[0-9]+]]:_(s64) = G_OR [[LSHR8]], [[SHL5]]
+ ; GFX9: [[C15:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[LSHR9:%[0-9]+]]:_(s64) = G_LSHR [[UV9]], [[SUB8]](s32)
+ ; GFX9: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[OR5]], [[LSHR9]]
+ ; GFX9: [[SELECT10:%[0-9]+]]:_(s64) = G_SELECT [[ICMP9]](s1), [[UV8]], [[SELECT9]]
+ ; GFX9: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[LSHR7]], [[C15]]
+ ; GFX9: [[SELECT12:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR3]], [[SELECT10]]
+ ; GFX9: [[SELECT13:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR4]], [[SELECT11]]
+ ; GFX9: [[UV10:%[0-9]+]]:_(s64), [[UV11:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; GFX9: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV10]], [[SELECT12]]
+ ; GFX9: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV11]], [[SELECT13]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT14]](s64), [[SELECT15]](s64)
+ ; GFX9: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT1]], [[C11]]
+ ; GFX9: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[C12]]
+ ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+ ; GFX9: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+ %0:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ %1:_(s32) = COPY $vgpr8
+ %2:_(s256) = G_ZEXT %1
+ %3:_(s256) = G_LSHR %0, %2
+ $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %3
+...
+
+---
+name: test_lshr_v2s128_v2s32
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, $vgpr4_vgpr5
+
+ ; SI-LABEL: name: test_lshr_v2s128_v2s32
+ ; SI: [[COPY:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; SI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5
+ ; SI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](<2 x s128>)
+ ; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UV2]], [[C]]
+ ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UV2]]
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV2]](s32), [[C]]
+ ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV2]](s32), [[C1]]
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[UV2]](s32)
+ ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[UV2]](s32)
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
+ ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[SUB]](s32)
+ ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
+ ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV4]], [[SELECT]]
+ ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C2]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; SI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[UV3]], [[C3]]
+ ; SI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[UV3]]
+ ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV3]](s32), [[C3]]
+ ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV3]](s32), [[C4]]
+ ; SI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[UV3]](s32)
+ ; SI: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[UV3]](s32)
+ ; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB3]](s32)
+ ; SI: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR4]], [[SHL1]]
+ ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB2]](s32)
+ ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR1]], [[LSHR5]]
+ ; SI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV6]], [[SELECT3]]
+ ; SI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[LSHR3]], [[C5]]
+ ; SI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT4]](s64), [[SELECT5]](s64)
+ ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s128>) = G_BUILD_VECTOR [[MV]](s128), [[MV1]](s128)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<2 x s128>)
+ ; VI-LABEL: name: test_lshr_v2s128_v2s32
+ ; VI: [[COPY:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; VI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5
+ ; VI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](<2 x s128>)
+ ; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UV2]], [[C]]
+ ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UV2]]
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV2]](s32), [[C]]
+ ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV2]](s32), [[C1]]
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[UV2]](s32)
+ ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[UV2]](s32)
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
+ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[SUB]](s32)
+ ; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
+ ; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV4]], [[SELECT]]
+ ; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C2]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; VI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[UV3]], [[C3]]
+ ; VI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[UV3]]
+ ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV3]](s32), [[C3]]
+ ; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV3]](s32), [[C4]]
+ ; VI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[UV3]](s32)
+ ; VI: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[UV3]](s32)
+ ; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB3]](s32)
+ ; VI: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR4]], [[SHL1]]
+ ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB2]](s32)
+ ; VI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR1]], [[LSHR5]]
+ ; VI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV6]], [[SELECT3]]
+ ; VI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[LSHR3]], [[C5]]
+ ; VI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT4]](s64), [[SELECT5]](s64)
+ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s128>) = G_BUILD_VECTOR [[MV]](s128), [[MV1]](s128)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<2 x s128>)
+ ; GFX9-LABEL: name: test_lshr_v2s128_v2s32
+ ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5
+ ; GFX9: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](<2 x s128>)
+ ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UV2]], [[C]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UV2]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV2]](s32), [[C]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV2]](s32), [[C1]]
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[UV2]](s32)
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[UV2]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[SUB1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[LSHR1]], [[SHL]]
+ ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV5]], [[SUB]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[LSHR2]]
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV4]], [[SELECT]]
+ ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[LSHR]], [[C2]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT1]](s64), [[SELECT2]](s64)
+ ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; GFX9: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[UV3]], [[C3]]
+ ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[UV3]]
+ ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV3]](s32), [[C3]]
+ ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV3]](s32), [[C4]]
+ ; GFX9: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[UV3]](s32)
+ ; GFX9: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[UV3]](s32)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB3]](s32)
+ ; GFX9: [[OR1:%[0-9]+]]:_(s64) = G_OR [[LSHR4]], [[SHL1]]
+ ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB2]](s32)
+ ; GFX9: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR1]], [[LSHR5]]
+ ; GFX9: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV6]], [[SELECT3]]
+ ; GFX9: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[LSHR3]], [[C5]]
+ ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT4]](s64), [[SELECT5]](s64)
+ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s128>) = G_BUILD_VECTOR [[MV]](s128), [[MV1]](s128)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<2 x s128>)
+ %0:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ %1:_(<2 x s32>) = COPY $vgpr4_vgpr5
+ %2:_(<2 x s128>) = G_LSHR %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
+...
Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-merge-values.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-merge-values.mir?rev=353455&r1=353454&r2=353455&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-merge-values.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-merge-values.mir Thu Feb 7 11:37:44 2019
@@ -9,13 +9,13 @@ body: |
; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
+ ; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C3]]
- ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
+ ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C4]]
- ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND]], [[AND1]](s32)
+ ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[AND]](s32)
; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C5]]
@@ -39,26 +39,26 @@ body: |
; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
; CHECK: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
- ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
+ ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[C3]](s32)
; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[C4]]
- ; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
- ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C3]](s32)
+ ; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY [[C1]](s32)
; CHECK: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY1]], [[C5]]
- ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND]], [[AND1]](s32)
+ ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[AND]](s32)
; CHECK: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; CHECK: [[COPY2:%[0-9]+]]:_(s32) = COPY [[C]](s32)
; CHECK: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C6]]
; CHECK: [[COPY3:%[0-9]+]]:_(s32) = COPY [[SHL]](s32)
; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[COPY3]]
; CHECK: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
+ ; CHECK: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
+ ; CHECK: [[COPY4:%[0-9]+]]:_(s32) = COPY [[C7]](s32)
; CHECK: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY4]], [[C8]]
- ; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 16777215
- ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C7]](s32)
+ ; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[C2]](s32)
; CHECK: [[AND4:%[0-9]+]]:_(s32) = G_AND [[COPY5]], [[C9]]
- ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[AND4]](s32)
+ ; CHECK: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[AND4]], [[AND3]](s32)
; CHECK: [[COPY6:%[0-9]+]]:_(s32) = COPY [[OR]](s32)
; CHECK: [[COPY7:%[0-9]+]]:_(s32) = COPY [[SHL1]](s32)
; CHECK: [[OR1:%[0-9]+]]:_(s32) = G_OR [[COPY6]], [[COPY7]]
Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir?rev=353455&r1=353454&r2=353455&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir Thu Feb 7 11:37:44 2019
@@ -1,7 +1,7 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer %s -o - | FileCheck -check-prefix=SI %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer %s -o - | FileCheck -check-prefix=VI %s
-# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -O0 -run-pass=legalizer %s -o - | FileCheck -check-prefix=GFX9 %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=tahiti -O0 -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck -check-prefix=SI %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=fiji -O0 -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck -check-prefix=VI %s
+# RUN: llc -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 -O0 -run-pass=legalizer -global-isel-abort=0 %s -o - | FileCheck -check-prefix=GFX9 %s
---
name: test_shl_s32_s32
@@ -101,18 +101,14 @@ body: |
; VI-LABEL: name: test_shl_s64_s16
; VI: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
- ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
- ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND]](s32)
+ ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[TRUNC]](s16)
; VI: $vgpr0_vgpr1 = COPY [[SHL]](s64)
; GFX9-LABEL: name: test_shl_s64_s16
; GFX9: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr2
- ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
- ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[AND]](s32)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[COPY]], [[TRUNC]](s16)
; GFX9: $vgpr0_vgpr1 = COPY [[SHL]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s32) = COPY $vgpr2
@@ -165,11 +161,11 @@ body: |
; SI-LABEL: name: test_shl_s16_s16
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
- ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
- ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[AND]](s32)
+ ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+ ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[AND]](s32)
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SHL]](s32)
; SI: $vgpr0 = COPY [[COPY4]](s32)
; VI-LABEL: name: test_shl_s16_s16
@@ -206,11 +202,11 @@ body: |
; SI-LABEL: name: test_shl_s16_i8
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
- ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[AND]](s32)
+ ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+ ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[AND]](s32)
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SHL]](s32)
; SI: $vgpr0 = COPY [[COPY4]](s32)
; VI-LABEL: name: test_shl_s16_i8
@@ -219,8 +215,10 @@ body: |
; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
- ; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[AND]](s32)
+ ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+ ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]]
+ ; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[AND]](s32)
+ ; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC1]](s16)
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SHL]](s16)
; VI: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: test_shl_s16_i8
@@ -229,8 +227,10 @@ body: |
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
- ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[AND]](s32)
+ ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+ ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]]
+ ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[AND]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[TRUNC1]](s16)
; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SHL]](s16)
; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
@@ -251,31 +251,35 @@ body: |
; SI-LABEL: name: test_shl_i8_i8
; SI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
- ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY3]], [[C]]
- ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[AND]](s32)
+ ; SI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; SI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
+ ; SI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[AND]](s32)
; SI: [[COPY4:%[0-9]+]]:_(s32) = COPY [[SHL]](s32)
; SI: $vgpr0 = COPY [[COPY4]](s32)
; VI-LABEL: name: test_shl_i8_i8
; VI: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; VI: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
- ; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[AND]](s32)
+ ; VI: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+ ; VI: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]]
+ ; VI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[AND]](s32)
+ ; VI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; VI: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC]](s16)
; VI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SHL]](s16)
; VI: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: test_shl_i8_i8
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
- ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[C]]
- ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[AND]](s32)
+ ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+ ; GFX9: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY2]], [[COPY3]]
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[AND]](s32)
+ ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[TRUNC]](s16)
; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SHL]](s16)
; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
@@ -472,12 +476,12 @@ body: |
; SI: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
; SI: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
; SI: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
- ; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UV2]](s16)
+ ; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ANYEXT]], [[ZEXT]](s32)
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
- ; SI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UV3]](s16)
+ ; SI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
; SI: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[ANYEXT1]], [[ZEXT1]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL1]](s32)
; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16)
@@ -558,16 +562,16 @@ body: |
; SI: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[COPY1]](<4 x s16>), 0
; SI: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[EXTRACT]](<3 x s16>)
; SI: [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[EXTRACT1]](<3 x s16>)
- ; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UV3]](s16)
+ ; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ANYEXT]], [[ZEXT]](s32)
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
- ; SI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UV4]](s16)
+ ; SI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
; SI: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[ANYEXT1]], [[ZEXT1]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL1]](s32)
- ; SI: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
; SI: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UV5]](s16)
+ ; SI: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
; SI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ANYEXT2]], [[ZEXT2]](s32)
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[SHL2]](s32)
; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16), [[TRUNC2]](s16)
@@ -626,20 +630,20 @@ body: |
; SI: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
; SI: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
; SI: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
; SI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[UV4]](s16)
+ ; SI: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UV]](s16)
; SI: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ANYEXT]], [[ZEXT]](s32)
; SI: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[SHL]](s32)
- ; SI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
; SI: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UV5]](s16)
+ ; SI: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV1]](s16)
; SI: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[ANYEXT1]], [[ZEXT1]](s32)
; SI: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL1]](s32)
- ; SI: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
; SI: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[UV6]](s16)
+ ; SI: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV2]](s16)
; SI: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[ANYEXT2]], [[ZEXT2]](s32)
; SI: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[SHL2]](s32)
- ; SI: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
; SI: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[UV7]](s16)
+ ; SI: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV3]](s16)
; SI: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[ANYEXT3]], [[ZEXT3]](s32)
; SI: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[SHL3]](s32)
; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[TRUNC]](s16), [[TRUNC1]](s16), [[TRUNC2]](s16), [[TRUNC3]](s16)
@@ -669,3 +673,837 @@ body: |
%2:_(<4 x s16>) = G_SHL %0, %1
$vgpr0_vgpr1 = COPY %2
...
+
+---
+name: test_shl_s128_s128
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_shl_s128_s128
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; SI: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
+ ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC]](s32)
+ ; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC]](s32)
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[SUB1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SUB]](s32)
+ ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
+ ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
+ ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV1]], [[SELECT1]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_shl_s128_s128
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; VI: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
+ ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC]](s32)
+ ; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC]](s32)
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[SUB1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SUB]](s32)
+ ; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
+ ; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
+ ; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV1]], [[SELECT1]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_shl_s128_s128
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; GFX9: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[COPY1]](s32)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC]](s32)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[TRUNC]](s32)
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[SUB1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SUB]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
+ ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV1]], [[SELECT1]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = COPY $vgpr4
+ %2:_(s128) = G_ZEXT %1
+ %3:_(s128) = G_SHL %0, %2
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_shl_s128_s132
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_shl_s128_s132
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[C]]
+ ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY1]](s32), [[C]]
+ ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY1]](s32), [[C1]]
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[COPY1]](s32)
+ ; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[COPY1]](s32)
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[SUB1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SUB]](s32)
+ ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
+ ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
+ ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV1]], [[SELECT1]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_shl_s128_s132
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[C]]
+ ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY1]](s32), [[C]]
+ ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY1]](s32), [[C1]]
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[COPY1]](s32)
+ ; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[COPY1]](s32)
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[SUB1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SUB]](s32)
+ ; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
+ ; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
+ ; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV1]], [[SELECT1]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_shl_s128_s132
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr4
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY1]], [[C]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[COPY1]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY1]](s32), [[C]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[COPY1]](s32), [[C1]]
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[COPY1]](s32)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[COPY1]](s32)
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[SUB1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[SUB]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
+ ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV1]], [[SELECT1]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = COPY $vgpr4
+ %2:_(s128) = G_SHL %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %2
+...
+
+---
+name: test_shl_s128_s32_0
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_shl_s128_s32_0
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[UV]](s64), [[UV1]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_shl_s128_s32_0
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[UV]](s64), [[UV1]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_shl_s128_s32_0
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[UV]](s64), [[UV1]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 0
+ %3:_(s128) = G_SHL %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+
+---
+name: test_shl_s128_s32_23
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_shl_s128_s32_23
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[C]](s32)
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
+ ; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 41
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C2]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SHL]](s64), [[OR]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_shl_s128_s32_23
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[C]](s32)
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
+ ; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 41
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C2]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SHL]](s64), [[OR]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_shl_s128_s32_23
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 23
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 41
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C2]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SHL]](s64), [[OR]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 23
+ %3:_(s128) = G_SHL %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_shl_s128_s32_31
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_shl_s128_s32_31
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[C]](s32)
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C2]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SHL]](s64), [[OR]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_shl_s128_s32_31
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[C]](s32)
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C2]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SHL]](s64), [[OR]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_shl_s128_s32_31
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C2]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SHL]](s64), [[OR]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 31
+ %3:_(s128) = G_SHL %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_shl_s128_s32_32
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_shl_s128_s32_32
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[C]](s32)
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C2]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SHL]](s64), [[OR]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_shl_s128_s32_32
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[C]](s32)
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C2]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SHL]](s64), [[OR]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_shl_s128_s32_32
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 32
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C2]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SHL]](s64), [[OR]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 32
+ %3:_(s128) = G_SHL %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_shl_s128_s32_33
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_shl_s128_s32_33
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[C]](s32)
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C2]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SHL]](s64), [[OR]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_shl_s128_s32_33
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[C]](s32)
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C2]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SHL]](s64), [[OR]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_shl_s128_s32_33
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 33
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV1]], [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 31
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV]], [[C2]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SHL]](s64), [[OR]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 33
+ %3:_(s128) = G_SHL %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_shl_s128_s32_127
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3, $vgpr4
+
+ ; SI-LABEL: name: test_shl_s128_s32_127
+ ; SI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; SI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; SI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[C1]](s32)
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C]](s64), [[SHL]](s64)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; VI-LABEL: name: test_shl_s128_s32_127
+ ; VI: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; VI: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[C1]](s32)
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C]](s64), [[SHL]](s64)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ ; GFX9-LABEL: name: test_shl_s128_s32_127
+ ; GFX9: [[COPY:%[0-9]+]]:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ ; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](s128)
+ ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV]], [[C1]](s32)
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C]](s64), [[SHL]](s64)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[MV]](s128)
+ %0:_(s128) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
+ %1:_(s32) = G_CONSTANT i32 127
+ %3:_(s128) = G_SHL %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3 = COPY %3
+...
+
+---
+name: test_shl_s256_s256
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, $vgpr8
+
+ ; SI-LABEL: name: test_shl_s256_s256
+ ; SI: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; SI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
+ ; SI: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
+ ; SI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+ ; SI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
+ ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; SI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; SI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
+ ; SI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
+ ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
+ ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
+ ; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[SUB3]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; SI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV2]], [[SUB2]](s32)
+ ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL]], [[C4]]
+ ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR]], [[SHL2]]
+ ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV3]], [[SELECT1]]
+ ; SI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; SI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C5]]
+ ; SI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C5]], [[TRUNC]]
+ ; SI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C5]]
+ ; SI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C6]]
+ ; SI: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[TRUNC]](s32)
+ ; SI: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[TRUNC]](s32)
+ ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[SUB5]](s32)
+ ; SI: [[OR1:%[0-9]+]]:_(s64) = G_OR [[SHL4]], [[LSHR1]]
+ ; SI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV4]], [[SUB4]](s32)
+ ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[SHL3]], [[C7]]
+ ; SI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[OR1]], [[SHL5]]
+ ; SI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[UV5]], [[SELECT4]]
+ ; SI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; SI: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C8]]
+ ; SI: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C8]], [[SUB1]]
+ ; SI: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C8]]
+ ; SI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C9]]
+ ; SI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB1]](s32)
+ ; SI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB1]](s32)
+ ; SI: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB7]](s32)
+ ; SI: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[SHL6]]
+ ; SI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB6]](s32)
+ ; SI: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[OR2]], [[LSHR4]]
+ ; SI: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[UV6]], [[SELECT6]]
+ ; SI: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[LSHR2]], [[C10]]
+ ; SI: [[OR3:%[0-9]+]]:_(s64) = G_OR [[SELECT3]], [[SELECT7]]
+ ; SI: [[OR4:%[0-9]+]]:_(s64) = G_OR [[SELECT5]], [[SELECT8]]
+ ; SI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV8:%[0-9]+]]:_(s64), [[UV9:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; SI: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C13]]
+ ; SI: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C13]], [[SUB]]
+ ; SI: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C13]]
+ ; SI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C14]]
+ ; SI: [[SHL7:%[0-9]+]]:_(s64) = G_SHL [[UV9]], [[SUB]](s32)
+ ; SI: [[SHL8:%[0-9]+]]:_(s64) = G_SHL [[UV9]], [[SUB]](s32)
+ ; SI: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV8]], [[SUB9]](s32)
+ ; SI: [[OR5:%[0-9]+]]:_(s64) = G_OR [[SHL8]], [[LSHR5]]
+ ; SI: [[C15:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[SHL9:%[0-9]+]]:_(s64) = G_SHL [[UV8]], [[SUB8]](s32)
+ ; SI: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[SHL7]], [[C15]]
+ ; SI: [[SELECT10:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[OR5]], [[SHL9]]
+ ; SI: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP9]](s1), [[UV9]], [[SELECT10]]
+ ; SI: [[SELECT12:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT]], [[C11]]
+ ; SI: [[SELECT13:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[C12]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT12]](s64), [[SELECT13]](s64)
+ ; SI: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR3]], [[SELECT9]]
+ ; SI: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR4]], [[SELECT11]]
+ ; SI: [[UV10:%[0-9]+]]:_(s64), [[UV11:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; SI: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV10]], [[SELECT14]]
+ ; SI: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV11]], [[SELECT15]]
+ ; SI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+ ; SI: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+ ; VI-LABEL: name: test_shl_s256_s256
+ ; VI: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; VI: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
+ ; VI: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
+ ; VI: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+ ; VI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
+ ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; VI: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; VI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
+ ; VI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
+ ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
+ ; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
+ ; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[SUB3]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; VI: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV2]], [[SUB2]](s32)
+ ; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL]], [[C4]]
+ ; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR]], [[SHL2]]
+ ; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV3]], [[SELECT1]]
+ ; VI: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; VI: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C5]]
+ ; VI: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C5]], [[TRUNC]]
+ ; VI: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C5]]
+ ; VI: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C6]]
+ ; VI: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[TRUNC]](s32)
+ ; VI: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[TRUNC]](s32)
+ ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[SUB5]](s32)
+ ; VI: [[OR1:%[0-9]+]]:_(s64) = G_OR [[SHL4]], [[LSHR1]]
+ ; VI: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV4]], [[SUB4]](s32)
+ ; VI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[SHL3]], [[C7]]
+ ; VI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[OR1]], [[SHL5]]
+ ; VI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[UV5]], [[SELECT4]]
+ ; VI: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; VI: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C8]]
+ ; VI: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C8]], [[SUB1]]
+ ; VI: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C8]]
+ ; VI: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C9]]
+ ; VI: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB1]](s32)
+ ; VI: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB1]](s32)
+ ; VI: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB7]](s32)
+ ; VI: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[SHL6]]
+ ; VI: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB6]](s32)
+ ; VI: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[OR2]], [[LSHR4]]
+ ; VI: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[UV6]], [[SELECT6]]
+ ; VI: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[LSHR2]], [[C10]]
+ ; VI: [[OR3:%[0-9]+]]:_(s64) = G_OR [[SELECT3]], [[SELECT7]]
+ ; VI: [[OR4:%[0-9]+]]:_(s64) = G_OR [[SELECT5]], [[SELECT8]]
+ ; VI: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV8:%[0-9]+]]:_(s64), [[UV9:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; VI: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C13]]
+ ; VI: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C13]], [[SUB]]
+ ; VI: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C13]]
+ ; VI: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C14]]
+ ; VI: [[SHL7:%[0-9]+]]:_(s64) = G_SHL [[UV9]], [[SUB]](s32)
+ ; VI: [[SHL8:%[0-9]+]]:_(s64) = G_SHL [[UV9]], [[SUB]](s32)
+ ; VI: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV8]], [[SUB9]](s32)
+ ; VI: [[OR5:%[0-9]+]]:_(s64) = G_OR [[SHL8]], [[LSHR5]]
+ ; VI: [[C15:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[SHL9:%[0-9]+]]:_(s64) = G_SHL [[UV8]], [[SUB8]](s32)
+ ; VI: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[SHL7]], [[C15]]
+ ; VI: [[SELECT10:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[OR5]], [[SHL9]]
+ ; VI: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP9]](s1), [[UV9]], [[SELECT10]]
+ ; VI: [[SELECT12:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT]], [[C11]]
+ ; VI: [[SELECT13:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[C12]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT12]](s64), [[SELECT13]](s64)
+ ; VI: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR3]], [[SELECT9]]
+ ; VI: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR4]], [[SELECT11]]
+ ; VI: [[UV10:%[0-9]+]]:_(s64), [[UV11:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; VI: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV10]], [[SELECT14]]
+ ; VI: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV11]], [[SELECT15]]
+ ; VI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+ ; VI: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+ ; GFX9-LABEL: name: test_shl_s256_s256
+ ; GFX9: [[COPY:%[0-9]+]]:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr8
+ ; GFX9: [[ZEXT:%[0-9]+]]:_(s256) = G_ZEXT [[COPY1]](s32)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[ZEXT]](s256)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 128
+ ; GFX9: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](s256)
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[TRUNC]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C1]]
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; GFX9: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C2]]
+ ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[TRUNC]]
+ ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C2]]
+ ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C3]]
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[SUB3]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; GFX9: [[C4:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV2]], [[SUB2]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL]], [[C4]]
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR]], [[SHL2]]
+ ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV3]], [[SELECT1]]
+ ; GFX9: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; GFX9: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C5]]
+ ; GFX9: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C5]], [[TRUNC]]
+ ; GFX9: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C5]]
+ ; GFX9: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C6]]
+ ; GFX9: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[TRUNC]](s32)
+ ; GFX9: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[TRUNC]](s32)
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[SUB5]](s32)
+ ; GFX9: [[OR1:%[0-9]+]]:_(s64) = G_OR [[SHL4]], [[LSHR1]]
+ ; GFX9: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV4]], [[SUB4]](s32)
+ ; GFX9: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[SHL3]], [[C7]]
+ ; GFX9: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[OR1]], [[SHL5]]
+ ; GFX9: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[UV5]], [[SELECT4]]
+ ; GFX9: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; GFX9: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SUB1]], [[C8]]
+ ; GFX9: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C8]], [[SUB1]]
+ ; GFX9: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB1]](s32), [[C8]]
+ ; GFX9: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB1]](s32), [[C9]]
+ ; GFX9: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB1]](s32)
+ ; GFX9: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB1]](s32)
+ ; GFX9: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[SUB7]](s32)
+ ; GFX9: [[OR2:%[0-9]+]]:_(s64) = G_OR [[LSHR3]], [[SHL6]]
+ ; GFX9: [[C10:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[UV7]], [[SUB6]](s32)
+ ; GFX9: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[OR2]], [[LSHR4]]
+ ; GFX9: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[UV6]], [[SELECT6]]
+ ; GFX9: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[LSHR2]], [[C10]]
+ ; GFX9: [[OR3:%[0-9]+]]:_(s64) = G_OR [[SELECT3]], [[SELECT7]]
+ ; GFX9: [[OR4:%[0-9]+]]:_(s64) = G_OR [[SELECT5]], [[SELECT8]]
+ ; GFX9: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV8:%[0-9]+]]:_(s64), [[UV9:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; GFX9: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SUB]], [[C13]]
+ ; GFX9: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C13]], [[SUB]]
+ ; GFX9: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[SUB]](s32), [[C13]]
+ ; GFX9: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[SUB]](s32), [[C14]]
+ ; GFX9: [[SHL7:%[0-9]+]]:_(s64) = G_SHL [[UV9]], [[SUB]](s32)
+ ; GFX9: [[SHL8:%[0-9]+]]:_(s64) = G_SHL [[UV9]], [[SUB]](s32)
+ ; GFX9: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[UV8]], [[SUB9]](s32)
+ ; GFX9: [[OR5:%[0-9]+]]:_(s64) = G_OR [[SHL8]], [[LSHR5]]
+ ; GFX9: [[C15:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[SHL9:%[0-9]+]]:_(s64) = G_SHL [[UV8]], [[SUB8]](s32)
+ ; GFX9: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[SHL7]], [[C15]]
+ ; GFX9: [[SELECT10:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[OR5]], [[SHL9]]
+ ; GFX9: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP9]](s1), [[UV9]], [[SELECT10]]
+ ; GFX9: [[SELECT12:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT]], [[C11]]
+ ; GFX9: [[SELECT13:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SELECT2]], [[C12]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT12]](s64), [[SELECT13]](s64)
+ ; GFX9: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR3]], [[SELECT9]]
+ ; GFX9: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR4]], [[SELECT11]]
+ ; GFX9: [[UV10:%[0-9]+]]:_(s64), [[UV11:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; GFX9: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV10]], [[SELECT14]]
+ ; GFX9: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV11]], [[SELECT15]]
+ ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT16]](s64), [[SELECT17]](s64)
+ ; GFX9: [[MV2:%[0-9]+]]:_(s256) = G_MERGE_VALUES [[MV]](s128), [[MV1]](s128)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[MV2]](s256)
+ %0:_(s256) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ %1:_(s32) = COPY $vgpr8
+ %2:_(s256) = G_ZEXT %1
+ %3:_(s256) = G_SHL %0, %2
+ $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %3
+...
+
+---
+name: test_shl_v2s128_v2s32
+body: |
+ bb.0:
+ liveins: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7, $vgpr4_vgpr5
+
+ ; SI-LABEL: name: test_shl_v2s128_v2s32
+ ; SI: [[COPY:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; SI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5
+ ; SI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](<2 x s128>)
+ ; SI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; SI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; SI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UV2]], [[C]]
+ ; SI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UV2]]
+ ; SI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV2]](s32), [[C]]
+ ; SI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV2]](s32), [[C1]]
+ ; SI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[UV2]](s32)
+ ; SI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[UV2]](s32)
+ ; SI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[SUB1]](s32)
+ ; SI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; SI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV4]], [[SUB]](s32)
+ ; SI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
+ ; SI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
+ ; SI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV5]], [[SELECT1]]
+ ; SI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
+ ; SI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; SI: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; SI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[UV3]], [[C3]]
+ ; SI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[UV3]]
+ ; SI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; SI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV3]](s32), [[C3]]
+ ; SI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV3]](s32), [[C4]]
+ ; SI: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[UV3]](s32)
+ ; SI: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[UV3]](s32)
+ ; SI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB3]](s32)
+ ; SI: [[OR1:%[0-9]+]]:_(s64) = G_OR [[SHL4]], [[LSHR1]]
+ ; SI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; SI: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV6]], [[SUB2]](s32)
+ ; SI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL3]], [[C5]]
+ ; SI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR1]], [[SHL5]]
+ ; SI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV7]], [[SELECT4]]
+ ; SI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT3]](s64), [[SELECT5]](s64)
+ ; SI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s128>) = G_BUILD_VECTOR [[MV]](s128), [[MV1]](s128)
+ ; SI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<2 x s128>)
+ ; VI-LABEL: name: test_shl_v2s128_v2s32
+ ; VI: [[COPY:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; VI: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5
+ ; VI: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](<2 x s128>)
+ ; VI: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; VI: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; VI: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UV2]], [[C]]
+ ; VI: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UV2]]
+ ; VI: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV2]](s32), [[C]]
+ ; VI: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV2]](s32), [[C1]]
+ ; VI: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[UV2]](s32)
+ ; VI: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[UV2]](s32)
+ ; VI: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[SUB1]](s32)
+ ; VI: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV4]], [[SUB]](s32)
+ ; VI: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
+ ; VI: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
+ ; VI: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV5]], [[SELECT1]]
+ ; VI: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
+ ; VI: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; VI: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; VI: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[UV3]], [[C3]]
+ ; VI: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[UV3]]
+ ; VI: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; VI: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV3]](s32), [[C3]]
+ ; VI: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV3]](s32), [[C4]]
+ ; VI: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[UV3]](s32)
+ ; VI: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[UV3]](s32)
+ ; VI: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB3]](s32)
+ ; VI: [[OR1:%[0-9]+]]:_(s64) = G_OR [[SHL4]], [[LSHR1]]
+ ; VI: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; VI: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV6]], [[SUB2]](s32)
+ ; VI: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL3]], [[C5]]
+ ; VI: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR1]], [[SHL5]]
+ ; VI: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV7]], [[SELECT4]]
+ ; VI: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT3]](s64), [[SELECT5]](s64)
+ ; VI: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s128>) = G_BUILD_VECTOR [[MV]](s128), [[MV1]](s128)
+ ; VI: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<2 x s128>)
+ ; GFX9-LABEL: name: test_shl_v2s128_v2s32
+ ; GFX9: [[COPY:%[0-9]+]]:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ ; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr4_vgpr5
+ ; GFX9: [[UV:%[0-9]+]]:_(s128), [[UV1:%[0-9]+]]:_(s128) = G_UNMERGE_VALUES [[COPY]](<2 x s128>)
+ ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV]](s128)
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UV2]], [[C]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C]], [[UV2]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV2]](s32), [[C]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV2]](s32), [[C1]]
+ ; GFX9: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[UV2]](s32)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV5]], [[UV2]](s32)
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV4]], [[SUB1]](s32)
+ ; GFX9: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV4]], [[SUB]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C2]]
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
+ ; GFX9: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV5]], [[SELECT1]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT]](s64), [[SELECT2]](s64)
+ ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; GFX9: [[UV6:%[0-9]+]]:_(s64), [[UV7:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[UV1]](s128)
+ ; GFX9: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[UV3]], [[C3]]
+ ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[UV3]]
+ ; GFX9: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV3]](s32), [[C3]]
+ ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[UV3]](s32), [[C4]]
+ ; GFX9: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[UV3]](s32)
+ ; GFX9: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[UV7]], [[UV3]](s32)
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[UV6]], [[SUB3]](s32)
+ ; GFX9: [[OR1:%[0-9]+]]:_(s64) = G_OR [[SHL4]], [[LSHR1]]
+ ; GFX9: [[C5:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[UV6]], [[SUB2]](s32)
+ ; GFX9: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL3]], [[C5]]
+ ; GFX9: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR1]], [[SHL5]]
+ ; GFX9: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[UV7]], [[SELECT4]]
+ ; GFX9: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[SELECT3]](s64), [[SELECT5]](s64)
+ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s128>) = G_BUILD_VECTOR [[MV]](s128), [[MV1]](s128)
+ ; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY [[BUILD_VECTOR]](<2 x s128>)
+ %0:_(<2 x s128>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
+ %1:_(<2 x s32>) = COPY $vgpr4_vgpr5
+ %2:_(<2 x s128>) = G_SHL %0, %1
+ $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7 = COPY %2
+...
Modified: llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-unmerge-values.mir
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-unmerge-values.mir?rev=353455&r1=353454&r2=353455&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-unmerge-values.mir (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/GlobalISel/legalize-unmerge-values.mir Thu Feb 7 11:37:44 2019
@@ -234,58 +234,180 @@ body: |
liveins: $vgpr0
; CHECK-LABEL: name: test_unmerge_s1_s8
; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
- ; CHECK: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; CHECK: [[ZEXT:%[0-9]+]]:_(s128) = G_ZEXT [[TRUNC]](s8)
- ; CHECK: [[C:%[0-9]+]]:_(s128) = G_CONSTANT i128 15
- ; CHECK: [[SHL:%[0-9]+]]:_(s128) = G_SHL [[ZEXT]], [[C]](s128)
- ; CHECK: [[OR:%[0-9]+]]:_(s128) = G_OR [[ZEXT]], [[SHL]]
- ; CHECK: [[C1:%[0-9]+]]:_(s128) = G_CONSTANT i128 30
- ; CHECK: [[SHL1:%[0-9]+]]:_(s128) = G_SHL [[OR]], [[C1]](s128)
- ; CHECK: [[OR1:%[0-9]+]]:_(s128) = G_OR [[OR]], [[SHL1]]
- ; CHECK: [[C2:%[0-9]+]]:_(s128) = G_CONSTANT i128 45
- ; CHECK: [[SHL2:%[0-9]+]]:_(s128) = G_SHL [[OR1]], [[C2]](s128)
- ; CHECK: [[OR2:%[0-9]+]]:_(s128) = G_OR [[OR1]], [[SHL2]]
- ; CHECK: [[C3:%[0-9]+]]:_(s128) = G_CONSTANT i128 60
- ; CHECK: [[SHL3:%[0-9]+]]:_(s128) = G_SHL [[OR2]], [[C3]](s128)
- ; CHECK: [[OR3:%[0-9]+]]:_(s128) = G_OR [[OR2]], [[SHL3]]
- ; CHECK: [[C4:%[0-9]+]]:_(s128) = G_CONSTANT i128 75
- ; CHECK: [[SHL4:%[0-9]+]]:_(s128) = G_SHL [[OR3]], [[C4]](s128)
- ; CHECK: [[OR4:%[0-9]+]]:_(s128) = G_OR [[OR3]], [[SHL4]]
- ; CHECK: [[C5:%[0-9]+]]:_(s128) = G_CONSTANT i128 90
- ; CHECK: [[SHL5:%[0-9]+]]:_(s128) = G_SHL [[OR4]], [[C5]](s128)
- ; CHECK: [[OR5:%[0-9]+]]:_(s128) = G_OR [[OR4]], [[SHL5]]
- ; CHECK: [[C6:%[0-9]+]]:_(s128) = G_CONSTANT i128 105
- ; CHECK: [[SHL6:%[0-9]+]]:_(s128) = G_SHL [[OR5]], [[C6]](s128)
- ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[OR5]](s128)
- ; CHECK: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[SHL6]](s128)
- ; CHECK: [[OR6:%[0-9]+]]:_(s64) = G_OR [[UV]], [[UV2]]
- ; CHECK: [[OR7:%[0-9]+]]:_(s64) = G_OR [[UV1]], [[UV3]]
- ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[OR6]](s64), [[OR7]](s64)
- ; CHECK: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16), [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16), [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[MV]](s128)
- ; CHECK: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[UV4]](s16)
- ; CHECK: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC1]](s1)
- ; CHECK: [[TRUNC2:%[0-9]+]]:_(s1) = G_TRUNC [[UV5]](s16)
- ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s1)
- ; CHECK: [[TRUNC3:%[0-9]+]]:_(s1) = G_TRUNC [[UV6]](s16)
- ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC3]](s1)
- ; CHECK: [[TRUNC4:%[0-9]+]]:_(s1) = G_TRUNC [[UV7]](s16)
- ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC4]](s1)
- ; CHECK: [[TRUNC5:%[0-9]+]]:_(s1) = G_TRUNC [[UV8]](s16)
- ; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC5]](s1)
- ; CHECK: [[TRUNC6:%[0-9]+]]:_(s1) = G_TRUNC [[UV9]](s16)
- ; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC6]](s1)
- ; CHECK: [[TRUNC7:%[0-9]+]]:_(s1) = G_TRUNC [[UV10]](s16)
- ; CHECK: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC7]](s1)
- ; CHECK: [[TRUNC8:%[0-9]+]]:_(s1) = G_TRUNC [[UV11]](s16)
- ; CHECK: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC8]](s1)
- ; CHECK: $vgpr0 = COPY [[ANYEXT]](s32)
- ; CHECK: $vgpr1 = COPY [[ANYEXT1]](s32)
- ; CHECK: $vgpr2 = COPY [[ANYEXT2]](s32)
- ; CHECK: $vgpr3 = COPY [[ANYEXT3]](s32)
- ; CHECK: $vgpr4 = COPY [[ANYEXT4]](s32)
- ; CHECK: $vgpr5 = COPY [[ANYEXT5]](s32)
- ; CHECK: $vgpr6 = COPY [[ANYEXT6]](s32)
- ; CHECK: $vgpr7 = COPY [[ANYEXT7]](s32)
+ ; CHECK: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 255
+ ; CHECK: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[ANYEXT:%[0-9]+]]:_(s128) = G_ANYEXT [[COPY]](s32)
+ ; CHECK: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[ANYEXT]](s128)
+ ; CHECK: [[AND:%[0-9]+]]:_(s64) = G_AND [[UV]], [[C]]
+ ; CHECK: [[AND1:%[0-9]+]]:_(s64) = G_AND [[UV1]], [[C1]]
+ ; CHECK: [[MV:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[AND]](s64), [[AND1]](s64)
+ ; CHECK: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 15
+ ; CHECK: [[C3:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[MV1:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C2]](s64), [[C3]](s64)
+ ; CHECK: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[MV1]](s128)
+ ; CHECK: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; CHECK: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[MV]](s128)
+ ; CHECK: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[TRUNC]], [[C4]]
+ ; CHECK: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C4]], [[TRUNC]]
+ ; CHECK: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC]](s32), [[C4]]
+ ; CHECK: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC]](s32), [[C5]]
+ ; CHECK: [[SHL:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
+ ; CHECK: [[SHL1:%[0-9]+]]:_(s64) = G_SHL [[UV3]], [[TRUNC]](s32)
+ ; CHECK: [[LSHR:%[0-9]+]]:_(s64) = G_LSHR [[UV2]], [[SUB1]](s32)
+ ; CHECK: [[OR:%[0-9]+]]:_(s64) = G_OR [[SHL1]], [[LSHR]]
+ ; CHECK: [[C6:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[SHL2:%[0-9]+]]:_(s64) = G_SHL [[UV2]], [[SUB]](s32)
+ ; CHECK: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[SHL]], [[C6]]
+ ; CHECK: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[OR]], [[SHL2]]
+ ; CHECK: [[SELECT2:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[UV3]], [[SELECT1]]
+ ; CHECK: [[UV4:%[0-9]+]]:_(s64), [[UV5:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[MV]](s128)
+ ; CHECK: [[OR1:%[0-9]+]]:_(s64) = G_OR [[UV4]], [[SELECT]]
+ ; CHECK: [[OR2:%[0-9]+]]:_(s64) = G_OR [[UV5]], [[SELECT2]]
+ ; CHECK: [[C7:%[0-9]+]]:_(s64) = G_CONSTANT i64 30
+ ; CHECK: [[C8:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[MV2:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C7]](s64), [[C8]](s64)
+ ; CHECK: [[TRUNC1:%[0-9]+]]:_(s32) = G_TRUNC [[MV2]](s128)
+ ; CHECK: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; CHECK: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[TRUNC1]], [[C9]]
+ ; CHECK: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C9]], [[TRUNC1]]
+ ; CHECK: [[C10:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC1]](s32), [[C9]]
+ ; CHECK: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC1]](s32), [[C10]]
+ ; CHECK: [[SHL3:%[0-9]+]]:_(s64) = G_SHL [[OR2]], [[TRUNC1]](s32)
+ ; CHECK: [[SHL4:%[0-9]+]]:_(s64) = G_SHL [[OR2]], [[TRUNC1]](s32)
+ ; CHECK: [[LSHR1:%[0-9]+]]:_(s64) = G_LSHR [[OR1]], [[SUB3]](s32)
+ ; CHECK: [[OR3:%[0-9]+]]:_(s64) = G_OR [[SHL4]], [[LSHR1]]
+ ; CHECK: [[C11:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[SHL5:%[0-9]+]]:_(s64) = G_SHL [[OR1]], [[SUB2]](s32)
+ ; CHECK: [[SELECT3:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[SHL3]], [[C11]]
+ ; CHECK: [[SELECT4:%[0-9]+]]:_(s64) = G_SELECT [[ICMP2]](s1), [[OR3]], [[SHL5]]
+ ; CHECK: [[SELECT5:%[0-9]+]]:_(s64) = G_SELECT [[ICMP3]](s1), [[OR2]], [[SELECT4]]
+ ; CHECK: [[OR4:%[0-9]+]]:_(s64) = G_OR [[OR1]], [[SELECT3]]
+ ; CHECK: [[OR5:%[0-9]+]]:_(s64) = G_OR [[OR2]], [[SELECT5]]
+ ; CHECK: [[C12:%[0-9]+]]:_(s64) = G_CONSTANT i64 45
+ ; CHECK: [[C13:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[MV3:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C12]](s64), [[C13]](s64)
+ ; CHECK: [[TRUNC2:%[0-9]+]]:_(s32) = G_TRUNC [[MV3]](s128)
+ ; CHECK: [[C14:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; CHECK: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[TRUNC2]], [[C14]]
+ ; CHECK: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C14]], [[TRUNC2]]
+ ; CHECK: [[C15:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK: [[ICMP4:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC2]](s32), [[C14]]
+ ; CHECK: [[ICMP5:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC2]](s32), [[C15]]
+ ; CHECK: [[SHL6:%[0-9]+]]:_(s64) = G_SHL [[OR5]], [[TRUNC2]](s32)
+ ; CHECK: [[SHL7:%[0-9]+]]:_(s64) = G_SHL [[OR5]], [[TRUNC2]](s32)
+ ; CHECK: [[LSHR2:%[0-9]+]]:_(s64) = G_LSHR [[OR4]], [[SUB5]](s32)
+ ; CHECK: [[OR6:%[0-9]+]]:_(s64) = G_OR [[SHL7]], [[LSHR2]]
+ ; CHECK: [[C16:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[SHL8:%[0-9]+]]:_(s64) = G_SHL [[OR4]], [[SUB4]](s32)
+ ; CHECK: [[SELECT6:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[SHL6]], [[C16]]
+ ; CHECK: [[SELECT7:%[0-9]+]]:_(s64) = G_SELECT [[ICMP4]](s1), [[OR6]], [[SHL8]]
+ ; CHECK: [[SELECT8:%[0-9]+]]:_(s64) = G_SELECT [[ICMP5]](s1), [[OR5]], [[SELECT7]]
+ ; CHECK: [[OR7:%[0-9]+]]:_(s64) = G_OR [[OR4]], [[SELECT6]]
+ ; CHECK: [[OR8:%[0-9]+]]:_(s64) = G_OR [[OR5]], [[SELECT8]]
+ ; CHECK: [[C17:%[0-9]+]]:_(s64) = G_CONSTANT i64 60
+ ; CHECK: [[C18:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[MV4:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C17]](s64), [[C18]](s64)
+ ; CHECK: [[TRUNC3:%[0-9]+]]:_(s32) = G_TRUNC [[MV4]](s128)
+ ; CHECK: [[C19:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; CHECK: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[TRUNC3]], [[C19]]
+ ; CHECK: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C19]], [[TRUNC3]]
+ ; CHECK: [[C20:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK: [[ICMP6:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC3]](s32), [[C19]]
+ ; CHECK: [[ICMP7:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC3]](s32), [[C20]]
+ ; CHECK: [[SHL9:%[0-9]+]]:_(s64) = G_SHL [[OR8]], [[TRUNC3]](s32)
+ ; CHECK: [[SHL10:%[0-9]+]]:_(s64) = G_SHL [[OR8]], [[TRUNC3]](s32)
+ ; CHECK: [[LSHR3:%[0-9]+]]:_(s64) = G_LSHR [[OR7]], [[SUB7]](s32)
+ ; CHECK: [[OR9:%[0-9]+]]:_(s64) = G_OR [[SHL10]], [[LSHR3]]
+ ; CHECK: [[C21:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[SHL11:%[0-9]+]]:_(s64) = G_SHL [[OR7]], [[SUB6]](s32)
+ ; CHECK: [[SELECT9:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[SHL9]], [[C21]]
+ ; CHECK: [[SELECT10:%[0-9]+]]:_(s64) = G_SELECT [[ICMP6]](s1), [[OR9]], [[SHL11]]
+ ; CHECK: [[SELECT11:%[0-9]+]]:_(s64) = G_SELECT [[ICMP7]](s1), [[OR8]], [[SELECT10]]
+ ; CHECK: [[OR10:%[0-9]+]]:_(s64) = G_OR [[OR7]], [[SELECT9]]
+ ; CHECK: [[OR11:%[0-9]+]]:_(s64) = G_OR [[OR8]], [[SELECT11]]
+ ; CHECK: [[C22:%[0-9]+]]:_(s64) = G_CONSTANT i64 75
+ ; CHECK: [[C23:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[MV5:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C22]](s64), [[C23]](s64)
+ ; CHECK: [[TRUNC4:%[0-9]+]]:_(s32) = G_TRUNC [[MV5]](s128)
+ ; CHECK: [[C24:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; CHECK: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[TRUNC4]], [[C24]]
+ ; CHECK: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[C24]], [[TRUNC4]]
+ ; CHECK: [[C25:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK: [[ICMP8:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC4]](s32), [[C24]]
+ ; CHECK: [[ICMP9:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC4]](s32), [[C25]]
+ ; CHECK: [[SHL12:%[0-9]+]]:_(s64) = G_SHL [[OR11]], [[TRUNC4]](s32)
+ ; CHECK: [[SHL13:%[0-9]+]]:_(s64) = G_SHL [[OR11]], [[TRUNC4]](s32)
+ ; CHECK: [[LSHR4:%[0-9]+]]:_(s64) = G_LSHR [[OR10]], [[SUB9]](s32)
+ ; CHECK: [[OR12:%[0-9]+]]:_(s64) = G_OR [[SHL13]], [[LSHR4]]
+ ; CHECK: [[C26:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[SHL14:%[0-9]+]]:_(s64) = G_SHL [[OR10]], [[SUB8]](s32)
+ ; CHECK: [[SELECT12:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[SHL12]], [[C26]]
+ ; CHECK: [[SELECT13:%[0-9]+]]:_(s64) = G_SELECT [[ICMP8]](s1), [[OR12]], [[SHL14]]
+ ; CHECK: [[SELECT14:%[0-9]+]]:_(s64) = G_SELECT [[ICMP9]](s1), [[OR11]], [[SELECT13]]
+ ; CHECK: [[OR13:%[0-9]+]]:_(s64) = G_OR [[OR10]], [[SELECT12]]
+ ; CHECK: [[OR14:%[0-9]+]]:_(s64) = G_OR [[OR11]], [[SELECT14]]
+ ; CHECK: [[C27:%[0-9]+]]:_(s64) = G_CONSTANT i64 90
+ ; CHECK: [[C28:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[MV6:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C27]](s64), [[C28]](s64)
+ ; CHECK: [[TRUNC5:%[0-9]+]]:_(s32) = G_TRUNC [[MV6]](s128)
+ ; CHECK: [[C29:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; CHECK: [[SUB10:%[0-9]+]]:_(s32) = G_SUB [[TRUNC5]], [[C29]]
+ ; CHECK: [[SUB11:%[0-9]+]]:_(s32) = G_SUB [[C29]], [[TRUNC5]]
+ ; CHECK: [[C30:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK: [[ICMP10:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC5]](s32), [[C29]]
+ ; CHECK: [[ICMP11:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC5]](s32), [[C30]]
+ ; CHECK: [[SHL15:%[0-9]+]]:_(s64) = G_SHL [[OR14]], [[TRUNC5]](s32)
+ ; CHECK: [[SHL16:%[0-9]+]]:_(s64) = G_SHL [[OR14]], [[TRUNC5]](s32)
+ ; CHECK: [[LSHR5:%[0-9]+]]:_(s64) = G_LSHR [[OR13]], [[SUB11]](s32)
+ ; CHECK: [[OR15:%[0-9]+]]:_(s64) = G_OR [[SHL16]], [[LSHR5]]
+ ; CHECK: [[C31:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[SHL17:%[0-9]+]]:_(s64) = G_SHL [[OR13]], [[SUB10]](s32)
+ ; CHECK: [[SELECT15:%[0-9]+]]:_(s64) = G_SELECT [[ICMP10]](s1), [[SHL15]], [[C31]]
+ ; CHECK: [[SELECT16:%[0-9]+]]:_(s64) = G_SELECT [[ICMP10]](s1), [[OR15]], [[SHL17]]
+ ; CHECK: [[SELECT17:%[0-9]+]]:_(s64) = G_SELECT [[ICMP11]](s1), [[OR14]], [[SELECT16]]
+ ; CHECK: [[OR16:%[0-9]+]]:_(s64) = G_OR [[OR13]], [[SELECT15]]
+ ; CHECK: [[OR17:%[0-9]+]]:_(s64) = G_OR [[OR14]], [[SELECT17]]
+ ; CHECK: [[C32:%[0-9]+]]:_(s64) = G_CONSTANT i64 105
+ ; CHECK: [[C33:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[MV7:%[0-9]+]]:_(s128) = G_MERGE_VALUES [[C32]](s64), [[C33]](s64)
+ ; CHECK: [[TRUNC6:%[0-9]+]]:_(s32) = G_TRUNC [[MV7]](s128)
+ ; CHECK: [[C34:%[0-9]+]]:_(s32) = G_CONSTANT i32 64
+ ; CHECK: [[SUB12:%[0-9]+]]:_(s32) = G_SUB [[TRUNC6]], [[C34]]
+ ; CHECK: [[SUB13:%[0-9]+]]:_(s32) = G_SUB [[C34]], [[TRUNC6]]
+ ; CHECK: [[C35:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; CHECK: [[ICMP12:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[TRUNC6]](s32), [[C34]]
+ ; CHECK: [[ICMP13:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[TRUNC6]](s32), [[C35]]
+ ; CHECK: [[SHL18:%[0-9]+]]:_(s64) = G_SHL [[OR17]], [[TRUNC6]](s32)
+ ; CHECK: [[SHL19:%[0-9]+]]:_(s64) = G_SHL [[OR17]], [[TRUNC6]](s32)
+ ; CHECK: [[LSHR6:%[0-9]+]]:_(s64) = G_LSHR [[OR16]], [[SUB13]](s32)
+ ; CHECK: [[OR18:%[0-9]+]]:_(s64) = G_OR [[SHL19]], [[LSHR6]]
+ ; CHECK: [[C36:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; CHECK: [[SHL20:%[0-9]+]]:_(s64) = G_SHL [[OR16]], [[SUB12]](s32)
+ ; CHECK: [[SELECT18:%[0-9]+]]:_(s64) = G_SELECT [[ICMP12]](s1), [[SHL18]], [[C36]]
+ ; CHECK: [[SELECT19:%[0-9]+]]:_(s64) = G_SELECT [[ICMP12]](s1), [[OR18]], [[SHL20]]
+ ; CHECK: [[SELECT20:%[0-9]+]]:_(s64) = G_SELECT [[ICMP13]](s1), [[OR17]], [[SELECT19]]
+ ; CHECK: [[OR19:%[0-9]+]]:_(s64) = G_OR [[OR16]], [[SELECT18]]
+ ; CHECK: [[OR20:%[0-9]+]]:_(s64) = G_OR [[OR17]], [[SELECT20]]
+ ; CHECK: [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16), [[UV8:%[0-9]+]]:_(s16), [[UV9:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[OR19]](s64)
+ ; CHECK: [[UV10:%[0-9]+]]:_(s16), [[UV11:%[0-9]+]]:_(s16), [[UV12:%[0-9]+]]:_(s16), [[UV13:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[OR20]](s64)
+ ; CHECK: [[ANYEXT1:%[0-9]+]]:_(s32) = G_ANYEXT [[UV6]](s16)
+ ; CHECK: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[UV7]](s16)
+ ; CHECK: [[ANYEXT3:%[0-9]+]]:_(s32) = G_ANYEXT [[UV8]](s16)
+ ; CHECK: [[ANYEXT4:%[0-9]+]]:_(s32) = G_ANYEXT [[UV9]](s16)
+ ; CHECK: [[ANYEXT5:%[0-9]+]]:_(s32) = G_ANYEXT [[UV10]](s16)
+ ; CHECK: [[ANYEXT6:%[0-9]+]]:_(s32) = G_ANYEXT [[UV11]](s16)
+ ; CHECK: [[ANYEXT7:%[0-9]+]]:_(s32) = G_ANYEXT [[UV12]](s16)
+ ; CHECK: [[ANYEXT8:%[0-9]+]]:_(s32) = G_ANYEXT [[UV13]](s16)
+ ; CHECK: $vgpr0 = COPY [[ANYEXT1]](s32)
+ ; CHECK: $vgpr1 = COPY [[ANYEXT2]](s32)
+ ; CHECK: $vgpr2 = COPY [[ANYEXT3]](s32)
+ ; CHECK: $vgpr3 = COPY [[ANYEXT4]](s32)
+ ; CHECK: $vgpr4 = COPY [[ANYEXT5]](s32)
+ ; CHECK: $vgpr5 = COPY [[ANYEXT6]](s32)
+ ; CHECK: $vgpr6 = COPY [[ANYEXT7]](s32)
+ ; CHECK: $vgpr7 = COPY [[ANYEXT8]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s8) = G_TRUNC %0
%2:_(s1), %3:_(s1), %4:_(s1), %5:_(s1), %6:_(s1), %7:_(s1), %8:_(s1), %9:_(s1) = G_UNMERGE_VALUES %1
More information about the llvm-commits
mailing list