[llvm] b35833b - [GlobalISel][AMDGPU] Legalize saturating add/subtract
Matt Arsenault via llvm-commits
llvm-commits at lists.llvm.org
Thu Jul 23 06:06:51 PDT 2020
Author: Jay Foad
Date: 2020-07-23T09:06:42-04:00
New Revision: b35833b84e95c42ca7166232e193b2b8e221b56a
URL: https://github.com/llvm/llvm-project/commit/b35833b84e95c42ca7166232e193b2b8e221b56a
DIFF: https://github.com/llvm/llvm-project/commit/b35833b84e95c42ca7166232e193b2b8e221b56a.diff
LOG: [GlobalISel][AMDGPU] Legalize saturating add/subtract
Add support in LegalizerHelper for lowering G_SADDSAT etc. either
using add/subtract-with-overflow or using max/min instructions.
Enable this lowering for AMDGPU so it can be tested. The legalization
rules are still approximate and skips out on using the clamp bit to
treat these as legal, which has never been used before. This also
doesn't yet try to deal with expanding SALU cases.
Added:
llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
Modified:
llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir
llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir
llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-uaddsat.mir
llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-usubsat.mir
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index c8bf212da55c..0a490249f156 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -317,6 +317,8 @@ class LegalizerHelper {
LegalizeResult lowerExtract(MachineInstr &MI);
LegalizeResult lowerInsert(MachineInstr &MI);
LegalizeResult lowerSADDO_SSUBO(MachineInstr &MI);
+ LegalizeResult lowerAddSubSatToMinMax(MachineInstr &MI);
+ LegalizeResult lowerAddSubSatToAddoSubo(MachineInstr &MI);
LegalizeResult lowerBswap(MachineInstr &MI);
LegalizeResult lowerBitreverse(MachineInstr &MI);
LegalizeResult lowerReadWriteRegister(MachineInstr &MI);
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
index 99c08dfec768..7cf1fb03d6e1 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/LegalizerInfo.h
@@ -1214,6 +1214,12 @@ class LegalizerInfo {
bool isLegal(const LegalityQuery &Query) const {
return getAction(Query).Action == LegalizeAction::Legal;
}
+
+ bool isLegalOrCustom(const LegalityQuery &Query) const {
+ auto Action = getAction(Query).Action;
+ return Action == LegalizeAction::Legal || Action == LegalizeAction::Custom;
+ }
+
bool isLegal(const MachineInstr &MI, const MachineRegisterInfo &MRI) const;
bool isLegalOrCustom(const MachineInstr &MI,
const MachineRegisterInfo &MRI) const;
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index e420e7512ad6..5dcb5b3271d8 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -2728,6 +2728,27 @@ LegalizerHelper::lower(MachineInstr &MI, unsigned TypeIdx, LLT Ty) {
case G_READ_REGISTER:
case G_WRITE_REGISTER:
return lowerReadWriteRegister(MI);
+ case G_UADDSAT:
+ case G_USUBSAT: {
+ // Try to make a reasonable guess about which lowering strategy to use. The
+ // target can override this with custom lowering and calling the
+ // implementation functions.
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+ if (LI.isLegalOrCustom({G_UMIN, Ty}))
+ return lowerAddSubSatToMinMax(MI);
+ return lowerAddSubSatToAddoSubo(MI);
+ }
+ case G_SADDSAT:
+ case G_SSUBSAT: {
+ LLT Ty = MRI.getType(MI.getOperand(0).getReg());
+
+ // FIXME: It would probably make more sense to see if G_SADDO is preferred,
+ // since it's a shorter expansion. However, we would need to figure out the
+ // preferred boolean type for the carry out for the query.
+ if (LI.isLegalOrCustom({G_SMIN, Ty}) && LI.isLegalOrCustom({G_SMAX, Ty}))
+ return lowerAddSubSatToMinMax(MI);
+ return lowerAddSubSatToAddoSubo(MI);
+ }
}
}
@@ -5315,6 +5336,151 @@ LegalizerHelper::lowerSADDO_SSUBO(MachineInstr &MI) {
return Legalized;
}
+LegalizerHelper::LegalizeResult
+LegalizerHelper::lowerAddSubSatToMinMax(MachineInstr &MI) {
+ Register Res = MI.getOperand(0).getReg();
+ Register LHS = MI.getOperand(1).getReg();
+ Register RHS = MI.getOperand(2).getReg();
+ LLT Ty = MRI.getType(Res);
+ bool IsSigned;
+ bool IsAdd;
+ unsigned BaseOp;
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("unexpected addsat/subsat opcode");
+ case TargetOpcode::G_UADDSAT:
+ IsSigned = false;
+ IsAdd = true;
+ BaseOp = TargetOpcode::G_ADD;
+ break;
+ case TargetOpcode::G_SADDSAT:
+ IsSigned = true;
+ IsAdd = true;
+ BaseOp = TargetOpcode::G_ADD;
+ break;
+ case TargetOpcode::G_USUBSAT:
+ IsSigned = false;
+ IsAdd = false;
+ BaseOp = TargetOpcode::G_SUB;
+ break;
+ case TargetOpcode::G_SSUBSAT:
+ IsSigned = true;
+ IsAdd = false;
+ BaseOp = TargetOpcode::G_SUB;
+ break;
+ }
+
+ if (IsSigned) {
+ // sadd.sat(a, b) ->
+ // hi = 0x7fffffff - smax(a, 0)
+ // lo = 0x80000000 - smin(a, 0)
+ // a + smin(smax(lo, b), hi)
+ // ssub.sat(a, b) ->
+ // lo = smax(a, -1) - 0x7fffffff
+ // hi = smin(a, -1) - 0x80000000
+ // a - smin(smax(lo, b), hi)
+ // TODO: AMDGPU can use a "median of 3" instruction here:
+ // a +/- med3(lo, b, hi)
+ uint64_t NumBits = Ty.getScalarSizeInBits();
+ auto MaxVal =
+ MIRBuilder.buildConstant(Ty, APInt::getSignedMaxValue(NumBits));
+ auto MinVal =
+ MIRBuilder.buildConstant(Ty, APInt::getSignedMinValue(NumBits));
+ MachineInstrBuilder Hi, Lo;
+ if (IsAdd) {
+ auto Zero = MIRBuilder.buildConstant(Ty, 0);
+ Hi = MIRBuilder.buildSub(Ty, MaxVal, MIRBuilder.buildSMax(Ty, LHS, Zero));
+ Lo = MIRBuilder.buildSub(Ty, MinVal, MIRBuilder.buildSMin(Ty, LHS, Zero));
+ } else {
+ auto NegOne = MIRBuilder.buildConstant(Ty, -1);
+ Lo = MIRBuilder.buildSub(Ty, MIRBuilder.buildSMax(Ty, LHS, NegOne),
+ MaxVal);
+ Hi = MIRBuilder.buildSub(Ty, MIRBuilder.buildSMin(Ty, LHS, NegOne),
+ MinVal);
+ }
+ auto RHSClamped =
+ MIRBuilder.buildSMin(Ty, MIRBuilder.buildSMax(Ty, Lo, RHS), Hi);
+ MIRBuilder.buildInstr(BaseOp, {Res}, {LHS, RHSClamped});
+ } else {
+ // uadd.sat(a, b) -> a + umin(~a, b)
+ // usub.sat(a, b) -> a - umin(a, b)
+ Register Not = IsAdd ? MIRBuilder.buildNot(Ty, LHS).getReg(0) : LHS;
+ auto Min = MIRBuilder.buildUMin(Ty, Not, RHS);
+ MIRBuilder.buildInstr(BaseOp, {Res}, {LHS, Min});
+ }
+
+ MI.eraseFromParent();
+ return Legalized;
+}
+
+LegalizerHelper::LegalizeResult
+LegalizerHelper::lowerAddSubSatToAddoSubo(MachineInstr &MI) {
+ Register Res = MI.getOperand(0).getReg();
+ Register LHS = MI.getOperand(1).getReg();
+ Register RHS = MI.getOperand(2).getReg();
+ LLT Ty = MRI.getType(Res);
+ LLT BoolTy = Ty.changeElementSize(1);
+ bool IsSigned;
+ bool IsAdd;
+ unsigned OverflowOp;
+ switch (MI.getOpcode()) {
+ default:
+ llvm_unreachable("unexpected addsat/subsat opcode");
+ case TargetOpcode::G_UADDSAT:
+ IsSigned = false;
+ IsAdd = true;
+ OverflowOp = TargetOpcode::G_UADDO;
+ break;
+ case TargetOpcode::G_SADDSAT:
+ IsSigned = true;
+ IsAdd = true;
+ OverflowOp = TargetOpcode::G_SADDO;
+ break;
+ case TargetOpcode::G_USUBSAT:
+ IsSigned = false;
+ IsAdd = false;
+ OverflowOp = TargetOpcode::G_USUBO;
+ break;
+ case TargetOpcode::G_SSUBSAT:
+ IsSigned = true;
+ IsAdd = false;
+ OverflowOp = TargetOpcode::G_SSUBO;
+ break;
+ }
+
+ auto OverflowRes =
+ MIRBuilder.buildInstr(OverflowOp, {Ty, BoolTy}, {LHS, RHS});
+ Register Tmp = OverflowRes.getReg(0);
+ Register Ov = OverflowRes.getReg(1);
+ MachineInstrBuilder Clamp;
+ if (IsSigned) {
+ // sadd.sat(a, b) ->
+ // {tmp, ov} = saddo(a, b)
+ // ov ? (tmp >>s 31) + 0x80000000 : r
+ // ssub.sat(a, b) ->
+ // {tmp, ov} = ssubo(a, b)
+ // ov ? (tmp >>s 31) + 0x80000000 : r
+ uint64_t NumBits = Ty.getScalarSizeInBits();
+ auto ShiftAmount = MIRBuilder.buildConstant(Ty, NumBits - 1);
+ auto Sign = MIRBuilder.buildAShr(Ty, Tmp, ShiftAmount);
+ auto MinVal =
+ MIRBuilder.buildConstant(Ty, APInt::getSignedMinValue(NumBits));
+ Clamp = MIRBuilder.buildAdd(Ty, Sign, MinVal);
+ } else {
+ // uadd.sat(a, b) ->
+ // {tmp, ov} = uaddo(a, b)
+ // ov ? 0xffffffff : tmp
+ // usub.sat(a, b) ->
+ // {tmp, ov} = usubo(a, b)
+ // ov ? 0 : tmp
+ Clamp = MIRBuilder.buildConstant(Ty, IsAdd ? -1 : 0);
+ }
+ MIRBuilder.buildSelect(Res, Ov, Clamp, Tmp);
+
+ MI.eraseFromParent();
+ return Legalized;
+}
+
LegalizerHelper::LegalizeResult
LegalizerHelper::lowerBswap(MachineInstr &MI) {
Register Dst = MI.getOperand(0).getReg();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index b7b6b113fac1..3417906d0884 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -422,23 +422,69 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.legalIf(isPointer(0));
if (ST.hasVOP3PInsts()) {
+ assert(ST.hasIntClamp() && "all targets with VOP3P should support clamp");
getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
.legalFor({S32, S16, V2S16})
.clampScalar(0, S16, S32)
.clampMaxNumElements(0, S16, 2)
.scalarize(0)
.widenScalarToNextPow2(0, 32);
+
+ getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT, G_SADDSAT, G_SSUBSAT})
+ .lowerFor({S32, S16, V2S16}) // FIXME: legal and merge with add/sub/mul
+ .minScalar(0, S16)
+ .clampMaxNumElements(0, S16, 2)
+ .scalarize(0)
+ .widenScalarToNextPow2(0, 32)
+ .lower();
} else if (ST.has16BitInsts()) {
getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
.legalFor({S32, S16})
.clampScalar(0, S16, S32)
.scalarize(0)
- .widenScalarToNextPow2(0, 32);
+ .widenScalarToNextPow2(0, 32); // FIXME: min should be 16
+
+ assert(ST.hasIntClamp() && "all targets with 16-bit should support clamp");
+
+ // Technically the saturating operations require clamp bit support, but this
+ // was introduced at the same time as 16-bit operations.
+ getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT})
+ .lowerFor({S32, S16}) // FIXME: legal with clamp modifier
+ .minScalar(0, S16)
+ .scalarize(0)
+ .widenScalarToNextPow2(0, 16)
+ .lower();
+
+ // We're just lowering this, but it helps get a better result to try to
+ // coerce to the desired type first.
+ getActionDefinitionsBuilder({G_SADDSAT, G_SSUBSAT})
+ .minScalar(0, S16)
+ .scalarize(0)
+ .lower();
} else {
getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
.legalFor({S32})
.clampScalar(0, S32, S32)
.scalarize(0);
+
+ if (ST.hasIntClamp()) {
+ getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT})
+ .lowerFor({S32}) // FIXME: legal with clamp modifier.
+ .scalarize(0)
+ .minScalarOrElt(0, S32)
+ .lower();
+ } else {
+ // Clamp bit support was added in VI, along with 16-bit operations.
+ getActionDefinitionsBuilder({G_UADDSAT, G_USUBSAT})
+ .minScalar(0, S32)
+ .scalarize(0)
+ .lower();
+ }
+
+ getActionDefinitionsBuilder({G_SADDSAT, G_SSUBSAT})
+ .minScalar(0, S32)
+ .scalarize(0)
+ .lower();
}
getActionDefinitionsBuilder({G_SDIV, G_UDIV, G_SREM, G_UREM})
@@ -1432,12 +1478,6 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
SextInReg.lowerFor({{S32}, {S64}});
}
- // FIXME: Placeholder rule. Really depends on whether the clamp modifier is
- // available, and is selectively legal for s16, s32, v2s16.
- getActionDefinitionsBuilder({G_SADDSAT, G_SSUBSAT, G_UADDSAT, G_USUBSAT})
- .scalarize(0)
- .clampScalar(0, S16, S32);
-
SextInReg
.scalarize(0)
.clampScalar(0, S32, S64)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index eff858ea9b89..ce6dd41bc941 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -3547,6 +3547,10 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
return getDefaultMappingSOP(MI);
LLVM_FALLTHROUGH;
+ case AMDGPU::G_SADDSAT: // FIXME: Could lower sat ops for SALU
+ case AMDGPU::G_SSUBSAT:
+ case AMDGPU::G_UADDSAT:
+ case AMDGPU::G_USUBSAT:
case AMDGPU::G_FADD:
case AMDGPU::G_FSUB:
case AMDGPU::G_FPTOSI:
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir
index 8b3fbdaa73eb..28a8efad1d10 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-saddsat.mir
@@ -12,51 +12,66 @@ body: |
; GFX6-LABEL: name: saddsat_s7
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7)
- ; GFX6: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s7)
- ; GFX6: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9
- ; GFX6: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX6: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX6: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[SHL]], [[SHL1]]
- ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
- ; GFX6: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[SADDSAT]](s16)
- ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT]], [[ZEXT]](s32)
- ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
- ; GFX6: [[TRUNC3:%[0-9]+]]:_(s7) = G_TRUNC [[TRUNC2]](s16)
- ; GFX6: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC3]](s7)
- ; GFX6: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 25
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[SHL]], [[C3]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMAX]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[SHL]], [[C3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SMIN]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB1]], [[SHL1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SHL]], [[SMIN1]]
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[ADD]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ASHR]](s32)
+ ; GFX6: $vgpr0 = COPY [[COPY4]](s32)
; GFX8-LABEL: name: saddsat_s7
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX8: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32)
- ; GFX8: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7)
- ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s7)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9
- ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX8: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[SHL]], [[SHL1]]
- ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SADDSAT]], [[C]](s16)
- ; GFX8: [[TRUNC2:%[0-9]+]]:_(s7) = G_TRUNC [[ASHR]](s16)
- ; GFX8: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s7)
- ; GFX8: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[SHL]], [[C3]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[C1]], [[SMAX]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[SHL]], [[C3]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[C2]], [[SMIN]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB1]], [[SHL1]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[SHL]], [[SMIN1]]
+ ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[ADD]], [[C]](s16)
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR]](s16)
+ ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: saddsat_s7
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32)
- ; GFX9: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7)
- ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s7)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9
- ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX9: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[SHL]], [[SHL1]]
- ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SADDSAT]], [[C]](s16)
- ; GFX9: [[TRUNC2:%[0-9]+]]:_(s7) = G_TRUNC [[ASHR]](s16)
- ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s7)
- ; GFX9: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; GFX9: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[SHL]], [[C3]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[C1]], [[SMAX]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[SHL]], [[C3]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[C2]], [[SMIN]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB1]], [[SHL1]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[SHL]], [[SMIN1]]
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[ADD]], [[C]](s16)
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR]](s16)
+ ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s7) = G_TRUNC %0
@@ -75,51 +90,66 @@ body: |
; GFX6-LABEL: name: saddsat_s8
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8)
- ; GFX6: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s8)
- ; GFX6: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX6: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX6: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX6: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[SHL]], [[SHL1]]
- ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
- ; GFX6: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[SADDSAT]](s16)
- ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT]], [[ZEXT]](s32)
- ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
- ; GFX6: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC2]](s16)
- ; GFX6: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC3]](s8)
- ; GFX6: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[SHL]], [[C3]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMAX]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[SHL]], [[C3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SMIN]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB1]], [[SHL1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SHL]], [[SMIN1]]
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[ADD]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ASHR]](s32)
+ ; GFX6: $vgpr0 = COPY [[COPY4]](s32)
; GFX8-LABEL: name: saddsat_s8
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX8: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; GFX8: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8)
- ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s8)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX8: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[SHL]], [[SHL1]]
- ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SADDSAT]], [[C]](s16)
- ; GFX8: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[ASHR]](s16)
- ; GFX8: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s8)
- ; GFX8: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[SHL]], [[C3]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[C1]], [[SMAX]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[SHL]], [[C3]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[C2]], [[SMIN]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB1]], [[SHL1]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[SHL]], [[SMIN1]]
+ ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[ADD]], [[C]](s16)
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR]](s16)
+ ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: saddsat_s8
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; GFX9: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8)
- ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s8)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX9: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[SHL]], [[SHL1]]
- ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SADDSAT]], [[C]](s16)
- ; GFX9: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[ASHR]](s16)
- ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s8)
- ; GFX9: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; GFX9: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[SHL]], [[C3]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[C1]], [[SMAX]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[SHL]], [[C3]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[C2]], [[SMIN]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB1]], [[SHL1]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[SHL]], [[SMIN1]]
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[ADD]], [[C]](s16)
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR]](s16)
+ ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s8) = G_TRUNC %0
@@ -138,75 +168,152 @@ body: |
; GFX6-LABEL: name: saddsat_v2s8
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16)
- ; GFX6: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16)
- ; GFX6: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>)
- ; GFX6: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>)
- ; GFX6: [[SADDSAT:%[0-9]+]]:_(s8) = G_SADDSAT [[UV]], [[UV2]]
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
- ; GFX6: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
- ; GFX6: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX6: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX6: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX6: [[SADDSAT1:%[0-9]+]]:_(s16) = G_SADDSAT [[SHL]], [[SHL1]]
- ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
- ; GFX6: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[SADDSAT1]](s16)
- ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT]], [[ZEXT]](s32)
- ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
- ; GFX6: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC2]](s16)
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[SADDSAT]](s8), [[TRUNC3]](s8)
- ; GFX6: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>)
- ; GFX6: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8)
- ; GFX6: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16)
- ; GFX6: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
+ ; GFX6: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
+ ; GFX6: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
+ ; GFX6: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C2]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C2]](s32)
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[SHL]], [[C5]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[SMAX]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[SHL]], [[C5]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C4]], [[SMIN]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB1]], [[SHL1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SHL]], [[SMIN1]]
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[ADD]], [[C2]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C2]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C2]](s32)
+ ; GFX6: [[SMAX2:%[0-9]+]]:_(s32) = G_SMAX [[SHL2]], [[C5]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C3]], [[SMAX2]]
+ ; GFX6: [[SMIN2:%[0-9]+]]:_(s32) = G_SMIN [[SHL2]], [[C5]]
+ ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C4]], [[SMIN2]]
+ ; GFX6: [[SMAX3:%[0-9]+]]:_(s32) = G_SMAX [[SUB3]], [[SHL3]]
+ ; GFX6: [[SMIN3:%[0-9]+]]:_(s32) = G_SMIN [[SMAX3]], [[SUB2]]
+ ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[SHL2]], [[SMIN3]]
+ ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ADD1]], [[C2]](s32)
+ ; GFX6: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
+ ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C6]]
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+ ; GFX6: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C7]]
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[COPY6]](s32)
+ ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL4]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
+ ; GFX6: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
+ ; GFX6: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX8-LABEL: name: saddsat_v2s8
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
+ ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
+ ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
+ ; GFX8: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
+ ; GFX8: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16)
- ; GFX8: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16)
- ; GFX8: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>)
- ; GFX8: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>)
- ; GFX8: [[SADDSAT:%[0-9]+]]:_(s8) = G_SADDSAT [[UV]], [[UV2]]
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
- ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
- ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX8: [[SADDSAT1:%[0-9]+]]:_(s16) = G_SADDSAT [[SHL]], [[SHL1]]
- ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SADDSAT1]], [[C]](s16)
- ; GFX8: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[ASHR]](s16)
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[SADDSAT]](s8), [[TRUNC2]](s8)
- ; GFX8: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>)
- ; GFX8: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8)
- ; GFX8: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16)
- ; GFX8: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
+ ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C3]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C3]](s16)
+ ; GFX8: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX8: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX8: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[SHL]], [[C6]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[C4]], [[SMAX]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[SHL]], [[C6]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[C5]], [[SMIN]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB1]], [[SHL1]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[SHL]], [[SMIN1]]
+ ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[ADD]], [[C3]](s16)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C3]](s16)
+ ; GFX8: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[C3]](s16)
+ ; GFX8: [[SMAX2:%[0-9]+]]:_(s16) = G_SMAX [[SHL2]], [[C6]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[C4]], [[SMAX2]]
+ ; GFX8: [[SMIN2:%[0-9]+]]:_(s16) = G_SMIN [[SHL2]], [[C6]]
+ ; GFX8: [[SUB3:%[0-9]+]]:_(s16) = G_SUB [[C5]], [[SMIN2]]
+ ; GFX8: [[SMAX3:%[0-9]+]]:_(s16) = G_SMAX [[SUB3]], [[SHL3]]
+ ; GFX8: [[SMIN3:%[0-9]+]]:_(s16) = G_SMIN [[SMAX3]], [[SUB2]]
+ ; GFX8: [[ADD1:%[0-9]+]]:_(s16) = G_ADD [[SHL2]], [[SMIN3]]
+ ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[ADD1]], [[C3]](s16)
+ ; GFX8: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
+ ; GFX8: [[COPY2:%[0-9]+]]:_(s16) = COPY [[ASHR]](s16)
+ ; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[COPY2]], [[C7]]
+ ; GFX8: [[COPY3:%[0-9]+]]:_(s16) = COPY [[ASHR1]](s16)
+ ; GFX8: [[AND1:%[0-9]+]]:_(s16) = G_AND [[COPY3]], [[C7]]
+ ; GFX8: [[SHL4:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C3]](s16)
+ ; GFX8: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL4]]
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
+ ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: saddsat_v2s8
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
+ ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
+ ; GFX9: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
+ ; GFX9: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16)
- ; GFX9: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16)
- ; GFX9: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>)
- ; GFX9: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>)
- ; GFX9: [[SADDSAT:%[0-9]+]]:_(s8) = G_SADDSAT [[UV]], [[UV2]]
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
- ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
- ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX9: [[SADDSAT1:%[0-9]+]]:_(s16) = G_SADDSAT [[SHL]], [[SHL1]]
- ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SADDSAT1]], [[C]](s16)
- ; GFX9: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[ASHR]](s16)
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[SADDSAT]](s8), [[TRUNC2]](s8)
- ; GFX9: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>)
- ; GFX9: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8)
- ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16)
- ; GFX9: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
+ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C3]](s16)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C3]](s16)
+ ; GFX9: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX9: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX9: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; GFX9: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[SHL]], [[C6]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[C4]], [[SMAX]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[SHL]], [[C6]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[C5]], [[SMIN]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB1]], [[SHL1]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[SHL]], [[SMIN1]]
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[ADD]], [[C3]](s16)
+ ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX9: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C3]](s16)
+ ; GFX9: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[C3]](s16)
+ ; GFX9: [[SMAX2:%[0-9]+]]:_(s16) = G_SMAX [[SHL2]], [[C6]]
+ ; GFX9: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[C4]], [[SMAX2]]
+ ; GFX9: [[SMIN2:%[0-9]+]]:_(s16) = G_SMIN [[SHL2]], [[C6]]
+ ; GFX9: [[SUB3:%[0-9]+]]:_(s16) = G_SUB [[C5]], [[SMIN2]]
+ ; GFX9: [[SMAX3:%[0-9]+]]:_(s16) = G_SMAX [[SUB3]], [[SHL3]]
+ ; GFX9: [[SMIN3:%[0-9]+]]:_(s16) = G_SMIN [[SMAX3]], [[SUB2]]
+ ; GFX9: [[ADD1:%[0-9]+]]:_(s16) = G_ADD [[SHL2]], [[SMIN3]]
+ ; GFX9: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[ADD1]], [[C3]](s16)
+ ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
+ ; GFX9: [[COPY2:%[0-9]+]]:_(s16) = COPY [[ASHR]](s16)
+ ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[COPY2]], [[C7]]
+ ; GFX9: [[COPY3:%[0-9]+]]:_(s16) = COPY [[ASHR1]](s16)
+ ; GFX9: [[AND1:%[0-9]+]]:_(s16) = G_AND [[COPY3]], [[C7]]
+ ; GFX9: [[SHL4:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C3]](s16)
+ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL4]]
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
+ ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_TRUNC %0
@@ -228,26 +335,57 @@ body: |
; GFX6-LABEL: name: saddsat_s16
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[TRUNC]], [[TRUNC1]]
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SADDSAT]](s16)
- ; GFX6: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[SHL]], [[C3]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMAX]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[SHL]], [[C3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SMIN]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB1]], [[SHL1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SHL]], [[SMIN1]]
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[ADD]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ASHR]](s32)
+ ; GFX6: $vgpr0 = COPY [[COPY4]](s32)
; GFX8-LABEL: name: saddsat_s16
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[TRUNC]], [[TRUNC1]]
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SADDSAT]](s16)
+ ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC]], [[C2]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[C]], [[SMAX]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC]], [[C2]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[C1]], [[SMIN]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB1]], [[TRUNC1]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[SMIN1]]
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ADD]](s16)
; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: saddsat_s16
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[TRUNC]], [[TRUNC1]]
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SADDSAT]](s16)
+ ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; GFX9: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC]], [[C2]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[C]], [[SMAX]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC]], [[C2]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[C1]], [[SMIN]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB1]], [[TRUNC1]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[SMIN1]]
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ADD]](s16)
; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
@@ -267,30 +405,99 @@ body: |
; GFX6-LABEL: name: saddsat_v2s16
; GFX6: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX6: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
- ; GFX6: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
- ; GFX6: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[UV]], [[UV2]]
- ; GFX6: [[SADDSAT1:%[0-9]+]]:_(s16) = G_SADDSAT [[UV1]], [[UV3]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[SADDSAT]](s16), [[SADDSAT1]](s16)
- ; GFX6: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX6: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX6: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[SHL]], [[C3]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMAX]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[SHL]], [[C3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SMIN]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB1]], [[SHL1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SHL]], [[SMIN1]]
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[ADD]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C]](s32)
+ ; GFX6: [[SMAX2:%[0-9]+]]:_(s32) = G_SMAX [[SHL2]], [[C3]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMAX2]]
+ ; GFX6: [[SMIN2:%[0-9]+]]:_(s32) = G_SMIN [[SHL2]], [[C3]]
+ ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SMIN2]]
+ ; GFX6: [[SMAX3:%[0-9]+]]:_(s32) = G_SMAX [[SUB3]], [[SHL3]]
+ ; GFX6: [[SMIN3:%[0-9]+]]:_(s32) = G_SMIN [[SMAX3]], [[SUB2]]
+ ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[SHL2]], [[SMIN3]]
+ ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ADD1]], [[C]](s32)
+ ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ASHR]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C4]]
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C4]]
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL4]]
+ ; GFX6: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX6: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
; GFX8-LABEL: name: saddsat_v2s16
; GFX8: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX8: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
- ; GFX8: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
- ; GFX8: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[UV]], [[UV2]]
- ; GFX8: [[SADDSAT1:%[0-9]+]]:_(s16) = G_SADDSAT [[UV1]], [[UV3]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[SADDSAT]](s16), [[SADDSAT1]](s16)
- ; GFX8: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC]], [[C3]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[C1]], [[SMAX]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC]], [[C3]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[C2]], [[SMIN]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB1]], [[TRUNC2]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[SMIN1]]
+ ; GFX8: [[SMAX2:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC1]], [[C3]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[C1]], [[SMAX2]]
+ ; GFX8: [[SMIN2:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC1]], [[C3]]
+ ; GFX8: [[SUB3:%[0-9]+]]:_(s16) = G_SUB [[C2]], [[SMIN2]]
+ ; GFX8: [[SMAX3:%[0-9]+]]:_(s16) = G_SMAX [[SUB3]], [[TRUNC3]]
+ ; GFX8: [[SMIN3:%[0-9]+]]:_(s16) = G_SMIN [[SMAX3]], [[SUB2]]
+ ; GFX8: [[ADD1:%[0-9]+]]:_(s16) = G_ADD [[TRUNC1]], [[SMIN3]]
+ ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ADD]](s16)
+ ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ADD1]](s16)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32)
+ ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
+ ; GFX8: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX8: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
; GFX9-LABEL: name: saddsat_v2s16
; GFX9: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX9: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
- ; GFX9: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
- ; GFX9: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[UV]], [[UV2]]
- ; GFX9: [[SADDSAT1:%[0-9]+]]:_(s16) = G_SADDSAT [[UV1]], [[UV3]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[SADDSAT]](s16), [[SADDSAT1]](s16)
- ; GFX9: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32767
+ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C]](s32), [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -32768
+ ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C1]](s32), [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C2]](s32), [[C2]](s32)
+ ; GFX9: [[SMAX:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[COPY]], [[BUILD_VECTOR_TRUNC2]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(<2 x s16>) = G_SUB [[BUILD_VECTOR_TRUNC]], [[SMAX]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[COPY]], [[BUILD_VECTOR_TRUNC2]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(<2 x s16>) = G_SUB [[BUILD_VECTOR_TRUNC1]], [[SMIN]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[SUB1]], [[COPY1]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(<2 x s16>) = G_ADD [[COPY]], [[SMIN1]]
+ ; GFX9: $vgpr0 = COPY [[ADD]](<2 x s16>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
%2:_(<2 x s16>) = G_SADDSAT %0, %1
@@ -306,42 +513,197 @@ body: |
; GFX6-LABEL: name: saddsat_v3s16
; GFX6: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
; GFX6: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
- ; GFX6: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>)
- ; GFX6: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>)
- ; GFX6: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[UV2]], [[UV5]]
- ; GFX6: [[SADDSAT1:%[0-9]+]]:_(s16) = G_SADDSAT [[UV3]], [[UV6]]
- ; GFX6: [[SADDSAT2:%[0-9]+]]:_(s16) = G_SADDSAT [[UV4]], [[UV7]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[SADDSAT]](s16), [[SADDSAT1]](s16), [[SADDSAT2]](s16)
; GFX6: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX6: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
- ; GFX6: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[EXTRACT]](<3 x s16>)
- ; GFX6: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>)
+ ; GFX6: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV]](<3 x s16>), 0
+ ; GFX6: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT]](<4 x s16>)
+ ; GFX6: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX6: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX6: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV1]](<3 x s16>), 0
+ ; GFX6: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT1]](<4 x s16>)
+ ; GFX6: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX6: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; GFX6: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[SHL]], [[C3]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMAX]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[SHL]], [[C3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SMIN]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB1]], [[SHL1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SHL]], [[SMIN1]]
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[ADD]], [[C]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; GFX6: [[SMAX2:%[0-9]+]]:_(s32) = G_SMAX [[SHL2]], [[C3]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMAX2]]
+ ; GFX6: [[SMIN2:%[0-9]+]]:_(s32) = G_SMIN [[SHL2]], [[C3]]
+ ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SMIN2]]
+ ; GFX6: [[SMAX3:%[0-9]+]]:_(s32) = G_SMAX [[SUB3]], [[SHL3]]
+ ; GFX6: [[SMIN3:%[0-9]+]]:_(s32) = G_SMIN [[SMAX3]], [[SUB2]]
+ ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[SHL2]], [[SMIN3]]
+ ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ADD1]], [[C]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32)
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C]](s32)
+ ; GFX6: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[COPY6]], [[C]](s32)
+ ; GFX6: [[SMAX4:%[0-9]+]]:_(s32) = G_SMAX [[SHL4]], [[C3]]
+ ; GFX6: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMAX4]]
+ ; GFX6: [[SMIN4:%[0-9]+]]:_(s32) = G_SMIN [[SHL4]], [[C3]]
+ ; GFX6: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SMIN4]]
+ ; GFX6: [[SMAX5:%[0-9]+]]:_(s32) = G_SMAX [[SUB5]], [[SHL5]]
+ ; GFX6: [[SMIN5:%[0-9]+]]:_(s32) = G_SMIN [[SMAX5]], [[SUB4]]
+ ; GFX6: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[SHL4]], [[SMIN5]]
+ ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[ADD2]], [[C]](s32)
+ ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ASHR]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C4]]
+ ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C4]]
+ ; GFX6: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL6]]
+ ; GFX6: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY [[ASHR2]](s32)
+ ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C4]]
+ ; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY [[C3]](s32)
+ ; GFX6: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[COPY10]], [[C]](s32)
+ ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL7]]
+ ; GFX6: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX6: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX6: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+ ; GFX6: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
+ ; GFX6: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[EXTRACT]](<3 x s16>), [[EXTRACT1]](<3 x s16>)
+ ; GFX6: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>)
; GFX8-LABEL: name: saddsat_v3s16
; GFX8: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
; GFX8: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
- ; GFX8: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>)
- ; GFX8: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>)
- ; GFX8: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[UV2]], [[UV5]]
- ; GFX8: [[SADDSAT1:%[0-9]+]]:_(s16) = G_SADDSAT [[UV3]], [[UV6]]
- ; GFX8: [[SADDSAT2:%[0-9]+]]:_(s16) = G_SADDSAT [[UV4]], [[UV7]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[SADDSAT]](s16), [[SADDSAT1]](s16), [[SADDSAT2]](s16)
; GFX8: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX8: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
- ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[EXTRACT]](<3 x s16>)
- ; GFX8: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>)
+ ; GFX8: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV]](<3 x s16>), 0
+ ; GFX8: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT]](<4 x s16>)
+ ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX8: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV1]](<3 x s16>), 0
+ ; GFX8: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT1]](<4 x s16>)
+ ; GFX8: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX8: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX8: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; GFX8: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC]], [[C3]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[C1]], [[SMAX]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC]], [[C3]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[C2]], [[SMIN]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB1]], [[TRUNC3]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[SMIN1]]
+ ; GFX8: [[SMAX2:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC1]], [[C3]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[C1]], [[SMAX2]]
+ ; GFX8: [[SMIN2:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC1]], [[C3]]
+ ; GFX8: [[SUB3:%[0-9]+]]:_(s16) = G_SUB [[C2]], [[SMIN2]]
+ ; GFX8: [[SMAX3:%[0-9]+]]:_(s16) = G_SMAX [[SUB3]], [[TRUNC4]]
+ ; GFX8: [[SMIN3:%[0-9]+]]:_(s16) = G_SMIN [[SMAX3]], [[SUB2]]
+ ; GFX8: [[ADD1:%[0-9]+]]:_(s16) = G_ADD [[TRUNC1]], [[SMIN3]]
+ ; GFX8: [[SMAX4:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC2]], [[C3]]
+ ; GFX8: [[SUB4:%[0-9]+]]:_(s16) = G_SUB [[C1]], [[SMAX4]]
+ ; GFX8: [[SMIN4:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC2]], [[C3]]
+ ; GFX8: [[SUB5:%[0-9]+]]:_(s16) = G_SUB [[C2]], [[SMIN4]]
+ ; GFX8: [[SMAX5:%[0-9]+]]:_(s16) = G_SMAX [[SUB5]], [[TRUNC5]]
+ ; GFX8: [[SMIN5:%[0-9]+]]:_(s16) = G_SMIN [[SMAX5]], [[SUB4]]
+ ; GFX8: [[ADD2:%[0-9]+]]:_(s16) = G_ADD [[TRUNC2]], [[SMIN5]]
+ ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ADD]](s16)
+ ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ADD1]](s16)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32)
+ ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
+ ; GFX8: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[ADD2]](s16)
+ ; GFX8: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[C4]], [[C]](s32)
+ ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL1]]
+ ; GFX8: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX8: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+ ; GFX8: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
+ ; GFX8: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[EXTRACT]](<3 x s16>), [[EXTRACT1]](<3 x s16>)
+ ; GFX8: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>)
; GFX9-LABEL: name: saddsat_v3s16
; GFX9: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
; GFX9: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
- ; GFX9: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>)
- ; GFX9: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>)
- ; GFX9: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[UV2]], [[UV5]]
- ; GFX9: [[SADDSAT1:%[0-9]+]]:_(s16) = G_SADDSAT [[UV3]], [[UV6]]
- ; GFX9: [[SADDSAT2:%[0-9]+]]:_(s16) = G_SADDSAT [[UV4]], [[UV7]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[SADDSAT]](s16), [[SADDSAT1]](s16), [[SADDSAT2]](s16)
; GFX9: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX9: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
- ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[EXTRACT]](<3 x s16>)
- ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>)
+ ; GFX9: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV]](<3 x s16>), 0
+ ; GFX9: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT]](<4 x s16>)
+ ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32)
+ ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX9: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY3]](s32), [[DEF1]](s32)
+ ; GFX9: [[DEF2:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF
+ ; GFX9: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV1]](<3 x s16>), 0
+ ; GFX9: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT1]](<4 x s16>)
+ ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
+ ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY4]](s32), [[COPY5]](s32)
+ ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY6]](s32), [[DEF1]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32767
+ ; GFX9: [[BUILD_VECTOR_TRUNC4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C1]](s32), [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -32768
+ ; GFX9: [[BUILD_VECTOR_TRUNC5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C2]](s32), [[C2]](s32)
+ ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[BUILD_VECTOR_TRUNC6:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C3]](s32), [[C3]](s32)
+ ; GFX9: [[SMAX:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR_TRUNC6]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(<2 x s16>) = G_SUB [[BUILD_VECTOR_TRUNC4]], [[SMAX]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR_TRUNC6]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(<2 x s16>) = G_SUB [[BUILD_VECTOR_TRUNC5]], [[SMIN]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[SUB1]], [[BUILD_VECTOR_TRUNC2]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(<2 x s16>) = G_ADD [[BUILD_VECTOR_TRUNC]], [[SMIN1]]
+ ; GFX9: [[BUILD_VECTOR_TRUNC7:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C1]](s32), [[C1]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC8:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C2]](s32), [[C2]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC9:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C3]](s32), [[C3]](s32)
+ ; GFX9: [[SMAX2:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[BUILD_VECTOR_TRUNC1]], [[BUILD_VECTOR_TRUNC9]]
+ ; GFX9: [[SUB2:%[0-9]+]]:_(<2 x s16>) = G_SUB [[BUILD_VECTOR_TRUNC7]], [[SMAX2]]
+ ; GFX9: [[SMIN2:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[BUILD_VECTOR_TRUNC1]], [[BUILD_VECTOR_TRUNC9]]
+ ; GFX9: [[SUB3:%[0-9]+]]:_(<2 x s16>) = G_SUB [[BUILD_VECTOR_TRUNC8]], [[SMIN2]]
+ ; GFX9: [[SMAX3:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[SUB3]], [[BUILD_VECTOR_TRUNC3]]
+ ; GFX9: [[SMIN3:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[SMAX3]], [[SUB2]]
+ ; GFX9: [[ADD1:%[0-9]+]]:_(<2 x s16>) = G_ADD [[BUILD_VECTOR_TRUNC1]], [[SMIN3]]
+ ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[ADD]](<2 x s16>), [[ADD1]](<2 x s16>), [[DEF2]](<2 x s16>)
+ ; GFX9: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<6 x s16>), 0
+ ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
+ ; GFX9: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[EXTRACT]](<3 x s16>), [[EXTRACT1]](<3 x s16>)
+ ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>)
%0:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<3 x s16>), %2:_(<3 x s16>) = G_UNMERGE_VALUES %0
%3:_(<3 x s16>) = G_SADDSAT %1, %2
@@ -359,36 +721,180 @@ body: |
; GFX6-LABEL: name: saddsat_v4s16
; GFX6: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
; GFX6: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX6: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX6: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX6: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[UV]], [[UV4]]
- ; GFX6: [[SADDSAT1:%[0-9]+]]:_(s16) = G_SADDSAT [[UV1]], [[UV5]]
- ; GFX6: [[SADDSAT2:%[0-9]+]]:_(s16) = G_SADDSAT [[UV2]], [[UV6]]
- ; GFX6: [[SADDSAT3:%[0-9]+]]:_(s16) = G_SADDSAT [[UV3]], [[UV7]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[SADDSAT]](s16), [[SADDSAT1]](s16), [[SADDSAT2]](s16), [[SADDSAT3]](s16)
- ; GFX6: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; GFX6: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX6: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX6: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX6: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX6: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX6: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX6: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[SHL]], [[C3]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMAX]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[SHL]], [[C3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SMIN]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB1]], [[SHL1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SHL]], [[SMIN1]]
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[ADD]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C]](s32)
+ ; GFX6: [[SMAX2:%[0-9]+]]:_(s32) = G_SMAX [[SHL2]], [[C3]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMAX2]]
+ ; GFX6: [[SMIN2:%[0-9]+]]:_(s32) = G_SMIN [[SHL2]], [[C3]]
+ ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SMIN2]]
+ ; GFX6: [[SMAX3:%[0-9]+]]:_(s32) = G_SMAX [[SUB3]], [[SHL3]]
+ ; GFX6: [[SMIN3:%[0-9]+]]:_(s32) = G_SMIN [[SMAX3]], [[SUB2]]
+ ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[SHL2]], [[SMIN3]]
+ ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[ADD1]], [[C]](s32)
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32)
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[COPY6]], [[C]](s32)
+ ; GFX6: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[COPY7]], [[C]](s32)
+ ; GFX6: [[SMAX4:%[0-9]+]]:_(s32) = G_SMAX [[SHL4]], [[C3]]
+ ; GFX6: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMAX4]]
+ ; GFX6: [[SMIN4:%[0-9]+]]:_(s32) = G_SMIN [[SHL4]], [[C3]]
+ ; GFX6: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SMIN4]]
+ ; GFX6: [[SMAX5:%[0-9]+]]:_(s32) = G_SMAX [[SUB5]], [[SHL5]]
+ ; GFX6: [[SMIN5:%[0-9]+]]:_(s32) = G_SMIN [[SMAX5]], [[SUB4]]
+ ; GFX6: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[SHL4]], [[SMIN5]]
+ ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[ADD2]], [[C]](s32)
+ ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
+ ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
+ ; GFX6: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[COPY8]], [[C]](s32)
+ ; GFX6: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[COPY9]], [[C]](s32)
+ ; GFX6: [[SMAX6:%[0-9]+]]:_(s32) = G_SMAX [[SHL6]], [[C3]]
+ ; GFX6: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMAX6]]
+ ; GFX6: [[SMIN6:%[0-9]+]]:_(s32) = G_SMIN [[SHL6]], [[C3]]
+ ; GFX6: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[C2]], [[SMIN6]]
+ ; GFX6: [[SMAX7:%[0-9]+]]:_(s32) = G_SMAX [[SUB7]], [[SHL7]]
+ ; GFX6: [[SMIN7:%[0-9]+]]:_(s32) = G_SMIN [[SMAX7]], [[SUB6]]
+ ; GFX6: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[SHL6]], [[SMIN7]]
+ ; GFX6: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[ADD3]], [[C]](s32)
+ ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY [[ASHR]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C4]]
+ ; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY11]], [[C4]]
+ ; GFX6: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL8]]
+ ; GFX6: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY [[ASHR2]](s32)
+ ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C4]]
+ ; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY [[ASHR3]](s32)
+ ; GFX6: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C4]]
+ ; GFX6: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C]](s32)
+ ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL9]]
+ ; GFX6: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX6: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX6: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
; GFX8-LABEL: name: saddsat_v4s16
; GFX8: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
; GFX8: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX8: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX8: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX8: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[UV]], [[UV4]]
- ; GFX8: [[SADDSAT1:%[0-9]+]]:_(s16) = G_SADDSAT [[UV1]], [[UV5]]
- ; GFX8: [[SADDSAT2:%[0-9]+]]:_(s16) = G_SADDSAT [[UV2]], [[UV6]]
- ; GFX8: [[SADDSAT3:%[0-9]+]]:_(s16) = G_SADDSAT [[UV3]], [[UV7]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[SADDSAT]](s16), [[SADDSAT1]](s16), [[SADDSAT2]](s16), [[SADDSAT3]](s16)
- ; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; GFX8: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX8: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX8: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX8: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX8: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX8: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX8: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX8: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 0
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC]], [[C3]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[C1]], [[SMAX]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC]], [[C3]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[C2]], [[SMIN]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB1]], [[TRUNC4]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[SMIN1]]
+ ; GFX8: [[SMAX2:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC1]], [[C3]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[C1]], [[SMAX2]]
+ ; GFX8: [[SMIN2:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC1]], [[C3]]
+ ; GFX8: [[SUB3:%[0-9]+]]:_(s16) = G_SUB [[C2]], [[SMIN2]]
+ ; GFX8: [[SMAX3:%[0-9]+]]:_(s16) = G_SMAX [[SUB3]], [[TRUNC5]]
+ ; GFX8: [[SMIN3:%[0-9]+]]:_(s16) = G_SMIN [[SMAX3]], [[SUB2]]
+ ; GFX8: [[ADD1:%[0-9]+]]:_(s16) = G_ADD [[TRUNC1]], [[SMIN3]]
+ ; GFX8: [[SMAX4:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC2]], [[C3]]
+ ; GFX8: [[SUB4:%[0-9]+]]:_(s16) = G_SUB [[C1]], [[SMAX4]]
+ ; GFX8: [[SMIN4:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC2]], [[C3]]
+ ; GFX8: [[SUB5:%[0-9]+]]:_(s16) = G_SUB [[C2]], [[SMIN4]]
+ ; GFX8: [[SMAX5:%[0-9]+]]:_(s16) = G_SMAX [[SUB5]], [[TRUNC6]]
+ ; GFX8: [[SMIN5:%[0-9]+]]:_(s16) = G_SMIN [[SMAX5]], [[SUB4]]
+ ; GFX8: [[ADD2:%[0-9]+]]:_(s16) = G_ADD [[TRUNC2]], [[SMIN5]]
+ ; GFX8: [[SMAX6:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC3]], [[C3]]
+ ; GFX8: [[SUB6:%[0-9]+]]:_(s16) = G_SUB [[C1]], [[SMAX6]]
+ ; GFX8: [[SMIN6:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC3]], [[C3]]
+ ; GFX8: [[SUB7:%[0-9]+]]:_(s16) = G_SUB [[C2]], [[SMIN6]]
+ ; GFX8: [[SMAX7:%[0-9]+]]:_(s16) = G_SMAX [[SUB7]], [[TRUNC7]]
+ ; GFX8: [[SMIN7:%[0-9]+]]:_(s16) = G_SMIN [[SMAX7]], [[SUB6]]
+ ; GFX8: [[ADD3:%[0-9]+]]:_(s16) = G_ADD [[TRUNC3]], [[SMIN7]]
+ ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ADD]](s16)
+ ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ADD1]](s16)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32)
+ ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
+ ; GFX8: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[ADD2]](s16)
+ ; GFX8: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[ADD3]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[ZEXT3]], [[C]](s32)
+ ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL1]]
+ ; GFX8: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX8: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
; GFX9-LABEL: name: saddsat_v4s16
; GFX9: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX9: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX9: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX9: [[SADDSAT:%[0-9]+]]:_(s16) = G_SADDSAT [[UV]], [[UV4]]
- ; GFX9: [[SADDSAT1:%[0-9]+]]:_(s16) = G_SADDSAT [[UV1]], [[UV5]]
- ; GFX9: [[SADDSAT2:%[0-9]+]]:_(s16) = G_SADDSAT [[UV2]], [[UV6]]
- ; GFX9: [[SADDSAT3:%[0-9]+]]:_(s16) = G_SADDSAT [[UV3]], [[UV7]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[SADDSAT]](s16), [[SADDSAT1]](s16), [[SADDSAT2]](s16), [[SADDSAT3]](s16)
- ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; GFX9: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX9: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32767
+ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C]](s32), [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -32768
+ ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C1]](s32), [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C2]](s32), [[C2]](s32)
+ ; GFX9: [[SMAX:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[UV]], [[BUILD_VECTOR_TRUNC2]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(<2 x s16>) = G_SUB [[BUILD_VECTOR_TRUNC]], [[SMAX]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[UV]], [[BUILD_VECTOR_TRUNC2]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(<2 x s16>) = G_SUB [[BUILD_VECTOR_TRUNC1]], [[SMIN]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[SUB1]], [[UV2]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(<2 x s16>) = G_ADD [[UV]], [[SMIN1]]
+ ; GFX9: [[BUILD_VECTOR_TRUNC3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C]](s32), [[C]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C1]](s32), [[C1]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C2]](s32), [[C2]](s32)
+ ; GFX9: [[SMAX2:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[UV1]], [[BUILD_VECTOR_TRUNC5]]
+ ; GFX9: [[SUB2:%[0-9]+]]:_(<2 x s16>) = G_SUB [[BUILD_VECTOR_TRUNC3]], [[SMAX2]]
+ ; GFX9: [[SMIN2:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[UV1]], [[BUILD_VECTOR_TRUNC5]]
+ ; GFX9: [[SUB3:%[0-9]+]]:_(<2 x s16>) = G_SUB [[BUILD_VECTOR_TRUNC4]], [[SMIN2]]
+ ; GFX9: [[SMAX3:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[SUB3]], [[UV3]]
+ ; GFX9: [[SMIN3:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[SMAX3]], [[SUB2]]
+ ; GFX9: [[ADD1:%[0-9]+]]:_(<2 x s16>) = G_ADD [[UV1]], [[SMIN3]]
+ ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[ADD]](<2 x s16>), [[ADD1]](<2 x s16>)
+ ; GFX9: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr2_vgpr3
%2:_(<4 x s16>) = G_SADDSAT %0, %1
@@ -404,18 +910,45 @@ body: |
; GFX6-LABEL: name: saddsat_s32
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[SADDSAT:%[0-9]+]]:_(s32) = G_SADDSAT [[COPY]], [[COPY1]]
- ; GFX6: $vgpr0 = COPY [[SADDSAT]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[COPY]], [[C2]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SMAX]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[COPY]], [[C2]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMIN]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB1]], [[COPY1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[SMIN1]]
+ ; GFX6: $vgpr0 = COPY [[ADD]](s32)
; GFX8-LABEL: name: saddsat_s32
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX8: [[SADDSAT:%[0-9]+]]:_(s32) = G_SADDSAT [[COPY]], [[COPY1]]
- ; GFX8: $vgpr0 = COPY [[SADDSAT]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[COPY]], [[C2]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SMAX]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[COPY]], [[C2]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMIN]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB1]], [[COPY1]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[SMIN1]]
+ ; GFX8: $vgpr0 = COPY [[ADD]](s32)
; GFX9-LABEL: name: saddsat_s32
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9: [[SADDSAT:%[0-9]+]]:_(s32) = G_SADDSAT [[COPY]], [[COPY1]]
- ; GFX9: $vgpr0 = COPY [[SADDSAT]](s32)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[COPY]], [[C2]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SMAX]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[COPY]], [[C2]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMIN]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB1]], [[COPY1]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[SMIN1]]
+ ; GFX9: $vgpr0 = COPY [[ADD]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_SADDSAT %0, %1
@@ -433,27 +966,72 @@ body: |
; GFX6: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX6: [[SADDSAT:%[0-9]+]]:_(s32) = G_SADDSAT [[UV]], [[UV2]]
- ; GFX6: [[SADDSAT1:%[0-9]+]]:_(s32) = G_SADDSAT [[UV1]], [[UV3]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SADDSAT]](s32), [[SADDSAT1]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[UV]], [[C2]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SMAX]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[UV]], [[C2]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMIN]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB1]], [[UV2]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV]], [[SMIN1]]
+ ; GFX6: [[SMAX2:%[0-9]+]]:_(s32) = G_SMAX [[UV1]], [[C2]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SMAX2]]
+ ; GFX6: [[SMIN2:%[0-9]+]]:_(s32) = G_SMIN [[UV1]], [[C2]]
+ ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMIN2]]
+ ; GFX6: [[SMAX3:%[0-9]+]]:_(s32) = G_SMAX [[SUB3]], [[UV3]]
+ ; GFX6: [[SMIN3:%[0-9]+]]:_(s32) = G_SMIN [[SMAX3]], [[SUB2]]
+ ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UV1]], [[SMIN3]]
+ ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ADD]](s32), [[ADD1]](s32)
; GFX6: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
; GFX8-LABEL: name: saddsat_v2s32
; GFX8: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX8: [[SADDSAT:%[0-9]+]]:_(s32) = G_SADDSAT [[UV]], [[UV2]]
- ; GFX8: [[SADDSAT1:%[0-9]+]]:_(s32) = G_SADDSAT [[UV1]], [[UV3]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SADDSAT]](s32), [[SADDSAT1]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[UV]], [[C2]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SMAX]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[UV]], [[C2]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMIN]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB1]], [[UV2]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV]], [[SMIN1]]
+ ; GFX8: [[SMAX2:%[0-9]+]]:_(s32) = G_SMAX [[UV1]], [[C2]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SMAX2]]
+ ; GFX8: [[SMIN2:%[0-9]+]]:_(s32) = G_SMIN [[UV1]], [[C2]]
+ ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMIN2]]
+ ; GFX8: [[SMAX3:%[0-9]+]]:_(s32) = G_SMAX [[SUB3]], [[UV3]]
+ ; GFX8: [[SMIN3:%[0-9]+]]:_(s32) = G_SMIN [[SMAX3]], [[SUB2]]
+ ; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UV1]], [[SMIN3]]
+ ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ADD]](s32), [[ADD1]](s32)
; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
; GFX9-LABEL: name: saddsat_v2s32
; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX9: [[SADDSAT:%[0-9]+]]:_(s32) = G_SADDSAT [[UV]], [[UV2]]
- ; GFX9: [[SADDSAT1:%[0-9]+]]:_(s32) = G_SADDSAT [[UV1]], [[UV3]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SADDSAT]](s32), [[SADDSAT1]](s32)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX9: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[UV]], [[C2]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SMAX]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[UV]], [[C2]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMIN]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB1]], [[UV2]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV]], [[SMIN1]]
+ ; GFX9: [[SMAX2:%[0-9]+]]:_(s32) = G_SMAX [[UV1]], [[C2]]
+ ; GFX9: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[C]], [[SMAX2]]
+ ; GFX9: [[SMIN2:%[0-9]+]]:_(s32) = G_SMIN [[UV1]], [[C2]]
+ ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[C1]], [[SMIN2]]
+ ; GFX9: [[SMAX3:%[0-9]+]]:_(s32) = G_SMAX [[SUB3]], [[UV3]]
+ ; GFX9: [[SMIN3:%[0-9]+]]:_(s32) = G_SMIN [[SMAX3]], [[SUB2]]
+ ; GFX9: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UV1]], [[SMIN3]]
+ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ADD]](s32), [[ADD1]](s32)
; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
@@ -470,18 +1048,69 @@ body: |
; GFX6-LABEL: name: saddsat_s64
; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX6: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX6: [[SADDSAT:%[0-9]+]]:_(s64) = G_SADDSAT [[COPY]], [[COPY1]]
- ; GFX6: $vgpr0_vgpr1 = COPY [[SADDSAT]](s64)
+ ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX6: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]]
+ ; GFX6: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]]
+ ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
+ ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
+ ; GFX6: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; GFX6: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
+ ; GFX6: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX6: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
+ ; GFX6: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO3]]
+ ; GFX6: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
+ ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
; GFX8-LABEL: name: saddsat_s64
; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX8: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX8: [[SADDSAT:%[0-9]+]]:_(s64) = G_SADDSAT [[COPY]], [[COPY1]]
- ; GFX8: $vgpr0_vgpr1 = COPY [[SADDSAT]](s64)
+ ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]]
+ ; GFX8: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]]
+ ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
+ ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
+ ; GFX8: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; GFX8: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
+ ; GFX8: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX8: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
+ ; GFX8: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO3]]
+ ; GFX8: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
+ ; GFX8: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
; GFX9-LABEL: name: saddsat_s64
; GFX9: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX9: [[SADDSAT:%[0-9]+]]:_(s64) = G_SADDSAT [[COPY]], [[COPY1]]
- ; GFX9: $vgpr0_vgpr1 = COPY [[SADDSAT]](s64)
+ ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX9: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]]
+ ; GFX9: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[COPY1]](s64), [[C]]
+ ; GFX9: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; GFX9: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
+ ; GFX9: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX9: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
+ ; GFX9: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO3]]
+ ; GFX9: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_SADDSAT %0, %1
@@ -499,27 +1128,120 @@ body: |
; GFX6: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
; GFX6: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; GFX6: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX6: [[SADDSAT:%[0-9]+]]:_(s64) = G_SADDSAT [[UV]], [[UV2]]
- ; GFX6: [[SADDSAT1:%[0-9]+]]:_(s64) = G_SADDSAT [[UV1]], [[UV3]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SADDSAT]](s64), [[SADDSAT1]](s64)
+ ; GFX6: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+ ; GFX6: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
+ ; GFX6: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
+ ; GFX6: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
+ ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
+ ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV2]](s64), [[C]]
+ ; GFX6: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; GFX6: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
+ ; GFX6: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX6: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
+ ; GFX6: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO3]]
+ ; GFX6: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
+ ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+ ; GFX6: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
+ ; GFX6: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[UV12]], [[UV14]]
+ ; GFX6: [[UADDE4:%[0-9]+]]:_(s32), [[UADDE5:%[0-9]+]]:_(s1) = G_UADDE [[UV13]], [[UV15]], [[UADDO5]]
+ ; GFX6: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO4]](s32), [[UADDE4]](s32)
+ ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
+ ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV3]](s64), [[C]]
+ ; GFX6: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
+ ; GFX6: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX6: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
+ ; GFX6: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX6: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
+ ; GFX6: [[UADDE6:%[0-9]+]]:_(s32), [[UADDE7:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO7]]
+ ; GFX6: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO6]](s32), [[UADDE6]](s32)
+ ; GFX6: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX6: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
; GFX8-LABEL: name: saddsat_v2s64
; GFX8: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
; GFX8: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; GFX8: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX8: [[SADDSAT:%[0-9]+]]:_(s64) = G_SADDSAT [[UV]], [[UV2]]
- ; GFX8: [[SADDSAT1:%[0-9]+]]:_(s64) = G_SADDSAT [[UV1]], [[UV3]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SADDSAT]](s64), [[SADDSAT1]](s64)
+ ; GFX8: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+ ; GFX8: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
+ ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
+ ; GFX8: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
+ ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
+ ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV2]](s64), [[C]]
+ ; GFX8: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; GFX8: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
+ ; GFX8: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX8: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
+ ; GFX8: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO3]]
+ ; GFX8: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
+ ; GFX8: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+ ; GFX8: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
+ ; GFX8: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[UV12]], [[UV14]]
+ ; GFX8: [[UADDE4:%[0-9]+]]:_(s32), [[UADDE5:%[0-9]+]]:_(s1) = G_UADDE [[UV13]], [[UV15]], [[UADDO5]]
+ ; GFX8: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO4]](s32), [[UADDE4]](s32)
+ ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
+ ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV3]](s64), [[C]]
+ ; GFX8: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
+ ; GFX8: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX8: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
+ ; GFX8: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX8: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
+ ; GFX8: [[UADDE6:%[0-9]+]]:_(s32), [[UADDE7:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO7]]
+ ; GFX8: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO6]](s32), [[UADDE6]](s32)
+ ; GFX8: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX8: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
; GFX9-LABEL: name: saddsat_v2s64
; GFX9: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX9: [[SADDSAT:%[0-9]+]]:_(s64) = G_SADDSAT [[UV]], [[UV2]]
- ; GFX9: [[SADDSAT1:%[0-9]+]]:_(s64) = G_SADDSAT [[UV1]], [[UV3]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SADDSAT]](s64), [[SADDSAT1]](s64)
+ ; GFX9: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+ ; GFX9: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
+ ; GFX9: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
+ ; GFX9: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV2]](s64), [[C]]
+ ; GFX9: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; GFX9: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
+ ; GFX9: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX9: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
+ ; GFX9: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO3]]
+ ; GFX9: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+ ; GFX9: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
+ ; GFX9: [[UADDO4:%[0-9]+]]:_(s32), [[UADDO5:%[0-9]+]]:_(s1) = G_UADDO [[UV12]], [[UV14]]
+ ; GFX9: [[UADDE4:%[0-9]+]]:_(s32), [[UADDE5:%[0-9]+]]:_(s1) = G_UADDE [[UV13]], [[UV15]], [[UADDO5]]
+ ; GFX9: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO4]](s32), [[UADDE4]](s32)
+ ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
+ ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[UV3]](s64), [[C]]
+ ; GFX9: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
+ ; GFX9: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX9: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
+ ; GFX9: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX9: [[UADDO6:%[0-9]+]]:_(s32), [[UADDO7:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
+ ; GFX9: [[UADDE6:%[0-9]+]]:_(s32), [[UADDE7:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO7]]
+ ; GFX9: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO6]](s32), [[UADDE6]](s32)
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir
index 31f119c13e5e..40eb12034c97 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ssubsat.mir
@@ -12,51 +12,66 @@ body: |
; GFX6-LABEL: name: ssubsat_s7
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7)
- ; GFX6: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s7)
- ; GFX6: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9
- ; GFX6: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX6: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX6: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[SHL]], [[SHL1]]
- ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
- ; GFX6: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[SSUBSAT]](s16)
- ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT]], [[ZEXT]](s32)
- ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
- ; GFX6: [[TRUNC3:%[0-9]+]]:_(s7) = G_TRUNC [[TRUNC2]](s16)
- ; GFX6: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC3]](s7)
- ; GFX6: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 25
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[SHL]], [[C3]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SMAX]], [[C1]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[SHL]], [[C3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SMIN]], [[C2]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB]], [[SHL1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[SMIN1]]
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SUB2]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ASHR]](s32)
+ ; GFX6: $vgpr0 = COPY [[COPY4]](s32)
; GFX8-LABEL: name: ssubsat_s7
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX8: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32)
- ; GFX8: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7)
- ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s7)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9
- ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX8: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[SHL]], [[SHL1]]
- ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SSUBSAT]], [[C]](s16)
- ; GFX8: [[TRUNC2:%[0-9]+]]:_(s7) = G_TRUNC [[ASHR]](s16)
- ; GFX8: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s7)
- ; GFX8: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[SHL]], [[C3]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SMAX]], [[C1]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[SHL]], [[C3]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[SMIN]], [[C2]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB]], [[SHL1]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[SHL]], [[SMIN1]]
+ ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SUB2]], [[C]](s16)
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR]](s16)
+ ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: ssubsat_s7
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32)
- ; GFX9: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7)
- ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s7)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9
- ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX9: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[SHL]], [[SHL1]]
- ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SSUBSAT]], [[C]](s16)
- ; GFX9: [[TRUNC2:%[0-9]+]]:_(s7) = G_TRUNC [[ASHR]](s16)
- ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s7)
- ; GFX9: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX9: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[SHL]], [[C3]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SMAX]], [[C1]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[SHL]], [[C3]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[SMIN]], [[C2]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB]], [[SHL1]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX9: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[SHL]], [[SMIN1]]
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SUB2]], [[C]](s16)
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR]](s16)
+ ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s7) = G_TRUNC %0
@@ -75,51 +90,66 @@ body: |
; GFX6-LABEL: name: ssubsat_s8
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8)
- ; GFX6: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s8)
- ; GFX6: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX6: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX6: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX6: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[SHL]], [[SHL1]]
- ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
- ; GFX6: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[SSUBSAT]](s16)
- ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT]], [[ZEXT]](s32)
- ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
- ; GFX6: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC2]](s16)
- ; GFX6: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC3]](s8)
- ; GFX6: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[SHL]], [[C3]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SMAX]], [[C1]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[SHL]], [[C3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SMIN]], [[C2]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB]], [[SHL1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[SMIN1]]
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SUB2]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ASHR]](s32)
+ ; GFX6: $vgpr0 = COPY [[COPY4]](s32)
; GFX8-LABEL: name: ssubsat_s8
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX8: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; GFX8: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8)
- ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s8)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX8: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[SHL]], [[SHL1]]
- ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SSUBSAT]], [[C]](s16)
- ; GFX8: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[ASHR]](s16)
- ; GFX8: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s8)
- ; GFX8: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[SHL]], [[C3]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SMAX]], [[C1]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[SHL]], [[C3]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[SMIN]], [[C2]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB]], [[SHL1]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[SHL]], [[SMIN1]]
+ ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SUB2]], [[C]](s16)
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR]](s16)
+ ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: ssubsat_s8
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; GFX9: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8)
- ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s8)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX9: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[SHL]], [[SHL1]]
- ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SSUBSAT]], [[C]](s16)
- ; GFX9: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[ASHR]](s16)
- ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s8)
- ; GFX9: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX9: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[SHL]], [[C3]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SMAX]], [[C1]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[SHL]], [[C3]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[SMIN]], [[C2]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB]], [[SHL1]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX9: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[SHL]], [[SMIN1]]
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SUB2]], [[C]](s16)
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ASHR]](s16)
+ ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s8) = G_TRUNC %0
@@ -138,75 +168,152 @@ body: |
; GFX6-LABEL: name: ssubsat_v2s8
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16)
- ; GFX6: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16)
- ; GFX6: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>)
- ; GFX6: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>)
- ; GFX6: [[SSUBSAT:%[0-9]+]]:_(s8) = G_SSUBSAT [[UV]], [[UV2]]
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
- ; GFX6: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
- ; GFX6: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX6: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX6: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX6: [[SSUBSAT1:%[0-9]+]]:_(s16) = G_SSUBSAT [[SHL]], [[SHL1]]
- ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
- ; GFX6: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[SSUBSAT1]](s16)
- ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SEXT]], [[ZEXT]](s32)
- ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
- ; GFX6: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC2]](s16)
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[SSUBSAT]](s8), [[TRUNC3]](s8)
- ; GFX6: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>)
- ; GFX6: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8)
- ; GFX6: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16)
- ; GFX6: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
+ ; GFX6: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
+ ; GFX6: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
+ ; GFX6: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C2]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C2]](s32)
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[SHL]], [[C5]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SMAX]], [[C3]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[SHL]], [[C5]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SMIN]], [[C4]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB]], [[SHL1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[SMIN1]]
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SUB2]], [[C2]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C2]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C2]](s32)
+ ; GFX6: [[SMAX2:%[0-9]+]]:_(s32) = G_SMAX [[SHL2]], [[C5]]
+ ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[SMAX2]], [[C3]]
+ ; GFX6: [[SMIN2:%[0-9]+]]:_(s32) = G_SMIN [[SHL2]], [[C5]]
+ ; GFX6: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SMIN2]], [[C4]]
+ ; GFX6: [[SMAX3:%[0-9]+]]:_(s32) = G_SMAX [[SUB3]], [[SHL3]]
+ ; GFX6: [[SMIN3:%[0-9]+]]:_(s32) = G_SMIN [[SMAX3]], [[SUB4]]
+ ; GFX6: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[SHL2]], [[SMIN3]]
+ ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SUB5]], [[C2]](s32)
+ ; GFX6: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
+ ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[ASHR]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C6]]
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+ ; GFX6: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C7]]
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[COPY6]](s32)
+ ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL4]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
+ ; GFX6: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
+ ; GFX6: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX8-LABEL: name: ssubsat_v2s8
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
+ ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
+ ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
+ ; GFX8: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
+ ; GFX8: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16)
- ; GFX8: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16)
- ; GFX8: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>)
- ; GFX8: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>)
- ; GFX8: [[SSUBSAT:%[0-9]+]]:_(s8) = G_SSUBSAT [[UV]], [[UV2]]
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
- ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
- ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX8: [[SSUBSAT1:%[0-9]+]]:_(s16) = G_SSUBSAT [[SHL]], [[SHL1]]
- ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SSUBSAT1]], [[C]](s16)
- ; GFX8: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[ASHR]](s16)
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[SSUBSAT]](s8), [[TRUNC2]](s8)
- ; GFX8: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>)
- ; GFX8: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8)
- ; GFX8: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16)
- ; GFX8: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
+ ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C3]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C3]](s16)
+ ; GFX8: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX8: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX8: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[SHL]], [[C6]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SMAX]], [[C4]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[SHL]], [[C6]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[SMIN]], [[C5]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB]], [[SHL1]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[SHL]], [[SMIN1]]
+ ; GFX8: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SUB2]], [[C3]](s16)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C3]](s16)
+ ; GFX8: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[C3]](s16)
+ ; GFX8: [[SMAX2:%[0-9]+]]:_(s16) = G_SMAX [[SHL2]], [[C6]]
+ ; GFX8: [[SUB3:%[0-9]+]]:_(s16) = G_SUB [[SMAX2]], [[C4]]
+ ; GFX8: [[SMIN2:%[0-9]+]]:_(s16) = G_SMIN [[SHL2]], [[C6]]
+ ; GFX8: [[SUB4:%[0-9]+]]:_(s16) = G_SUB [[SMIN2]], [[C5]]
+ ; GFX8: [[SMAX3:%[0-9]+]]:_(s16) = G_SMAX [[SUB3]], [[SHL3]]
+ ; GFX8: [[SMIN3:%[0-9]+]]:_(s16) = G_SMIN [[SMAX3]], [[SUB4]]
+ ; GFX8: [[SUB5:%[0-9]+]]:_(s16) = G_SUB [[SHL2]], [[SMIN3]]
+ ; GFX8: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SUB5]], [[C3]](s16)
+ ; GFX8: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
+ ; GFX8: [[COPY2:%[0-9]+]]:_(s16) = COPY [[ASHR]](s16)
+ ; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[COPY2]], [[C7]]
+ ; GFX8: [[COPY3:%[0-9]+]]:_(s16) = COPY [[ASHR1]](s16)
+ ; GFX8: [[AND1:%[0-9]+]]:_(s16) = G_AND [[COPY3]], [[C7]]
+ ; GFX8: [[SHL4:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C3]](s16)
+ ; GFX8: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL4]]
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
+ ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: ssubsat_v2s8
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
+ ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
+ ; GFX9: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
+ ; GFX9: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16)
- ; GFX9: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16)
- ; GFX9: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>)
- ; GFX9: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>)
- ; GFX9: [[SSUBSAT:%[0-9]+]]:_(s8) = G_SSUBSAT [[UV]], [[UV2]]
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
- ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
- ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX9: [[SSUBSAT1:%[0-9]+]]:_(s16) = G_SSUBSAT [[SHL]], [[SHL1]]
- ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SSUBSAT1]], [[C]](s16)
- ; GFX9: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[ASHR]](s16)
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[SSUBSAT]](s8), [[TRUNC2]](s8)
- ; GFX9: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>)
- ; GFX9: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8)
- ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16)
- ; GFX9: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
+ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C3]](s16)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C3]](s16)
+ ; GFX9: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX9: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX9: [[C6:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX9: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[SHL]], [[C6]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SMAX]], [[C4]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[SHL]], [[C6]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[SMIN]], [[C5]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB]], [[SHL1]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX9: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[SHL]], [[SMIN1]]
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s16) = G_ASHR [[SUB2]], [[C3]](s16)
+ ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX9: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C3]](s16)
+ ; GFX9: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[C3]](s16)
+ ; GFX9: [[SMAX2:%[0-9]+]]:_(s16) = G_SMAX [[SHL2]], [[C6]]
+ ; GFX9: [[SUB3:%[0-9]+]]:_(s16) = G_SUB [[SMAX2]], [[C4]]
+ ; GFX9: [[SMIN2:%[0-9]+]]:_(s16) = G_SMIN [[SHL2]], [[C6]]
+ ; GFX9: [[SUB4:%[0-9]+]]:_(s16) = G_SUB [[SMIN2]], [[C5]]
+ ; GFX9: [[SMAX3:%[0-9]+]]:_(s16) = G_SMAX [[SUB3]], [[SHL3]]
+ ; GFX9: [[SMIN3:%[0-9]+]]:_(s16) = G_SMIN [[SMAX3]], [[SUB4]]
+ ; GFX9: [[SUB5:%[0-9]+]]:_(s16) = G_SUB [[SHL2]], [[SMIN3]]
+ ; GFX9: [[ASHR1:%[0-9]+]]:_(s16) = G_ASHR [[SUB5]], [[C3]](s16)
+ ; GFX9: [[C7:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
+ ; GFX9: [[COPY2:%[0-9]+]]:_(s16) = COPY [[ASHR]](s16)
+ ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[COPY2]], [[C7]]
+ ; GFX9: [[COPY3:%[0-9]+]]:_(s16) = COPY [[ASHR1]](s16)
+ ; GFX9: [[AND1:%[0-9]+]]:_(s16) = G_AND [[COPY3]], [[C7]]
+ ; GFX9: [[SHL4:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C3]](s16)
+ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL4]]
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
+ ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_TRUNC %0
@@ -228,26 +335,57 @@ body: |
; GFX6-LABEL: name: ssubsat_s16
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[TRUNC]], [[TRUNC1]]
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SSUBSAT]](s16)
- ; GFX6: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[SHL]], [[C3]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SMAX]], [[C1]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[SHL]], [[C3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SMIN]], [[C2]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB]], [[SHL1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[SMIN1]]
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SUB2]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[ASHR]](s32)
+ ; GFX6: $vgpr0 = COPY [[COPY4]](s32)
; GFX8-LABEL: name: ssubsat_s16
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[TRUNC]], [[TRUNC1]]
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SSUBSAT]](s16)
+ ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC]], [[C2]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SMAX]], [[C]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC]], [[C2]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[SMIN]], [[C1]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB]], [[TRUNC1]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[TRUNC]], [[SMIN1]]
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SUB2]](s16)
; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: ssubsat_s16
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[TRUNC]], [[TRUNC1]]
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SSUBSAT]](s16)
+ ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX9: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX9: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC]], [[C2]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SMAX]], [[C]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC]], [[C2]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[SMIN]], [[C1]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB]], [[TRUNC1]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX9: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[TRUNC]], [[SMIN1]]
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SUB2]](s16)
; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
@@ -267,30 +405,99 @@ body: |
; GFX6-LABEL: name: ssubsat_v2s16
; GFX6: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX6: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
- ; GFX6: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
- ; GFX6: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV]], [[UV2]]
- ; GFX6: [[SSUBSAT1:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV1]], [[UV3]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[SSUBSAT]](s16), [[SSUBSAT1]](s16)
- ; GFX6: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX6: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX6: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[SHL]], [[C3]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SMAX]], [[C1]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[SHL]], [[C3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SMIN]], [[C2]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB]], [[SHL1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[SMIN1]]
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SUB2]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C]](s32)
+ ; GFX6: [[SMAX2:%[0-9]+]]:_(s32) = G_SMAX [[SHL2]], [[C3]]
+ ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[SMAX2]], [[C1]]
+ ; GFX6: [[SMIN2:%[0-9]+]]:_(s32) = G_SMIN [[SHL2]], [[C3]]
+ ; GFX6: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SMIN2]], [[C2]]
+ ; GFX6: [[SMAX3:%[0-9]+]]:_(s32) = G_SMAX [[SUB3]], [[SHL3]]
+ ; GFX6: [[SMIN3:%[0-9]+]]:_(s32) = G_SMIN [[SMAX3]], [[SUB4]]
+ ; GFX6: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[SHL2]], [[SMIN3]]
+ ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SUB5]], [[C]](s32)
+ ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[ASHR]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C4]]
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C4]]
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL4]]
+ ; GFX6: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX6: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
; GFX8-LABEL: name: ssubsat_v2s16
; GFX8: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX8: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
- ; GFX8: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
- ; GFX8: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV]], [[UV2]]
- ; GFX8: [[SSUBSAT1:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV1]], [[UV3]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[SSUBSAT]](s16), [[SSUBSAT1]](s16)
- ; GFX8: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC]], [[C3]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SMAX]], [[C1]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC]], [[C3]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[SMIN]], [[C2]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB]], [[TRUNC2]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[TRUNC]], [[SMIN1]]
+ ; GFX8: [[SMAX2:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC1]], [[C3]]
+ ; GFX8: [[SUB3:%[0-9]+]]:_(s16) = G_SUB [[SMAX2]], [[C1]]
+ ; GFX8: [[SMIN2:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC1]], [[C3]]
+ ; GFX8: [[SUB4:%[0-9]+]]:_(s16) = G_SUB [[SMIN2]], [[C2]]
+ ; GFX8: [[SMAX3:%[0-9]+]]:_(s16) = G_SMAX [[SUB3]], [[TRUNC3]]
+ ; GFX8: [[SMIN3:%[0-9]+]]:_(s16) = G_SMIN [[SMAX3]], [[SUB4]]
+ ; GFX8: [[SUB5:%[0-9]+]]:_(s16) = G_SUB [[TRUNC1]], [[SMIN3]]
+ ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[SUB2]](s16)
+ ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[SUB5]](s16)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32)
+ ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
+ ; GFX8: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX8: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
; GFX9-LABEL: name: ssubsat_v2s16
; GFX9: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX9: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
- ; GFX9: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
- ; GFX9: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV]], [[UV2]]
- ; GFX9: [[SSUBSAT1:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV1]], [[UV3]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[SSUBSAT]](s16), [[SSUBSAT1]](s16)
- ; GFX9: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32767
+ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C]](s32), [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -32768
+ ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C1]](s32), [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C2]](s32), [[C2]](s32)
+ ; GFX9: [[SMAX:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[COPY]], [[BUILD_VECTOR_TRUNC2]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(<2 x s16>) = G_SUB [[SMAX]], [[BUILD_VECTOR_TRUNC]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[COPY]], [[BUILD_VECTOR_TRUNC2]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(<2 x s16>) = G_SUB [[SMIN]], [[BUILD_VECTOR_TRUNC1]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[SUB]], [[COPY1]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX9: [[SUB2:%[0-9]+]]:_(<2 x s16>) = G_SUB [[COPY]], [[SMIN1]]
+ ; GFX9: $vgpr0 = COPY [[SUB2]](<2 x s16>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
%2:_(<2 x s16>) = G_SSUBSAT %0, %1
@@ -306,42 +513,197 @@ body: |
; GFX6-LABEL: name: ssubsat_v3s16
; GFX6: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
; GFX6: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
- ; GFX6: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>)
- ; GFX6: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>)
- ; GFX6: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV2]], [[UV5]]
- ; GFX6: [[SSUBSAT1:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV3]], [[UV6]]
- ; GFX6: [[SSUBSAT2:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV4]], [[UV7]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[SSUBSAT]](s16), [[SSUBSAT1]](s16), [[SSUBSAT2]](s16)
; GFX6: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX6: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
- ; GFX6: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[EXTRACT]](<3 x s16>)
- ; GFX6: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>)
+ ; GFX6: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV]](<3 x s16>), 0
+ ; GFX6: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT]](<4 x s16>)
+ ; GFX6: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX6: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX6: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV1]](<3 x s16>), 0
+ ; GFX6: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT1]](<4 x s16>)
+ ; GFX6: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX6: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; GFX6: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[SHL]], [[C3]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SMAX]], [[C1]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[SHL]], [[C3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SMIN]], [[C2]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB]], [[SHL1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[SMIN1]]
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SUB2]], [[C]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; GFX6: [[SMAX2:%[0-9]+]]:_(s32) = G_SMAX [[SHL2]], [[C3]]
+ ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[SMAX2]], [[C1]]
+ ; GFX6: [[SMIN2:%[0-9]+]]:_(s32) = G_SMIN [[SHL2]], [[C3]]
+ ; GFX6: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SMIN2]], [[C2]]
+ ; GFX6: [[SMAX3:%[0-9]+]]:_(s32) = G_SMAX [[SUB3]], [[SHL3]]
+ ; GFX6: [[SMIN3:%[0-9]+]]:_(s32) = G_SMIN [[SMAX3]], [[SUB4]]
+ ; GFX6: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[SHL2]], [[SMIN3]]
+ ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SUB5]], [[C]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32)
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C]](s32)
+ ; GFX6: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[COPY6]], [[C]](s32)
+ ; GFX6: [[SMAX4:%[0-9]+]]:_(s32) = G_SMAX [[SHL4]], [[C3]]
+ ; GFX6: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SMAX4]], [[C1]]
+ ; GFX6: [[SMIN4:%[0-9]+]]:_(s32) = G_SMIN [[SHL4]], [[C3]]
+ ; GFX6: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[SMIN4]], [[C2]]
+ ; GFX6: [[SMAX5:%[0-9]+]]:_(s32) = G_SMAX [[SUB6]], [[SHL5]]
+ ; GFX6: [[SMIN5:%[0-9]+]]:_(s32) = G_SMIN [[SMAX5]], [[SUB7]]
+ ; GFX6: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SHL4]], [[SMIN5]]
+ ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SUB8]], [[C]](s32)
+ ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[ASHR]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C4]]
+ ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C4]]
+ ; GFX6: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL6]]
+ ; GFX6: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY [[ASHR2]](s32)
+ ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C4]]
+ ; GFX6: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX6: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[C5]], [[C]](s32)
+ ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL7]]
+ ; GFX6: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX6: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX6: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+ ; GFX6: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
+ ; GFX6: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[EXTRACT]](<3 x s16>), [[EXTRACT1]](<3 x s16>)
+ ; GFX6: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>)
; GFX8-LABEL: name: ssubsat_v3s16
; GFX8: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
; GFX8: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
- ; GFX8: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>)
- ; GFX8: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>)
- ; GFX8: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV2]], [[UV5]]
- ; GFX8: [[SSUBSAT1:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV3]], [[UV6]]
- ; GFX8: [[SSUBSAT2:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV4]], [[UV7]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[SSUBSAT]](s16), [[SSUBSAT1]](s16), [[SSUBSAT2]](s16)
; GFX8: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX8: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
- ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[EXTRACT]](<3 x s16>)
- ; GFX8: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>)
+ ; GFX8: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV]](<3 x s16>), 0
+ ; GFX8: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT]](<4 x s16>)
+ ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX8: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV1]](<3 x s16>), 0
+ ; GFX8: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT1]](<4 x s16>)
+ ; GFX8: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX8: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX8: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; GFX8: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC]], [[C3]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SMAX]], [[C1]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC]], [[C3]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[SMIN]], [[C2]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB]], [[TRUNC3]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[TRUNC]], [[SMIN1]]
+ ; GFX8: [[SMAX2:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC1]], [[C3]]
+ ; GFX8: [[SUB3:%[0-9]+]]:_(s16) = G_SUB [[SMAX2]], [[C1]]
+ ; GFX8: [[SMIN2:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC1]], [[C3]]
+ ; GFX8: [[SUB4:%[0-9]+]]:_(s16) = G_SUB [[SMIN2]], [[C2]]
+ ; GFX8: [[SMAX3:%[0-9]+]]:_(s16) = G_SMAX [[SUB3]], [[TRUNC4]]
+ ; GFX8: [[SMIN3:%[0-9]+]]:_(s16) = G_SMIN [[SMAX3]], [[SUB4]]
+ ; GFX8: [[SUB5:%[0-9]+]]:_(s16) = G_SUB [[TRUNC1]], [[SMIN3]]
+ ; GFX8: [[SMAX4:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC2]], [[C3]]
+ ; GFX8: [[SUB6:%[0-9]+]]:_(s16) = G_SUB [[SMAX4]], [[C1]]
+ ; GFX8: [[SMIN4:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC2]], [[C3]]
+ ; GFX8: [[SUB7:%[0-9]+]]:_(s16) = G_SUB [[SMIN4]], [[C2]]
+ ; GFX8: [[SMAX5:%[0-9]+]]:_(s16) = G_SMAX [[SUB6]], [[TRUNC5]]
+ ; GFX8: [[SMIN5:%[0-9]+]]:_(s16) = G_SMIN [[SMAX5]], [[SUB7]]
+ ; GFX8: [[SUB8:%[0-9]+]]:_(s16) = G_SUB [[TRUNC2]], [[SMIN5]]
+ ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[SUB2]](s16)
+ ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[SUB5]](s16)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32)
+ ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
+ ; GFX8: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[SUB8]](s16)
+ ; GFX8: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[C4]], [[C]](s32)
+ ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL1]]
+ ; GFX8: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX8: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+ ; GFX8: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
+ ; GFX8: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[EXTRACT]](<3 x s16>), [[EXTRACT1]](<3 x s16>)
+ ; GFX8: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>)
; GFX9-LABEL: name: ssubsat_v3s16
; GFX9: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
; GFX9: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
- ; GFX9: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>)
- ; GFX9: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>)
- ; GFX9: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV2]], [[UV5]]
- ; GFX9: [[SSUBSAT1:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV3]], [[UV6]]
- ; GFX9: [[SSUBSAT2:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV4]], [[UV7]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[SSUBSAT]](s16), [[SSUBSAT1]](s16), [[SSUBSAT2]](s16)
; GFX9: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX9: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
- ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[EXTRACT]](<3 x s16>)
- ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>)
+ ; GFX9: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV]](<3 x s16>), 0
+ ; GFX9: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT]](<4 x s16>)
+ ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32)
+ ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX9: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY3]](s32), [[DEF1]](s32)
+ ; GFX9: [[DEF2:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF
+ ; GFX9: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV1]](<3 x s16>), 0
+ ; GFX9: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT1]](<4 x s16>)
+ ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
+ ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY4]](s32), [[COPY5]](s32)
+ ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY6]](s32), [[DEF1]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 32767
+ ; GFX9: [[BUILD_VECTOR_TRUNC4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C1]](s32), [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -32768
+ ; GFX9: [[BUILD_VECTOR_TRUNC5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C2]](s32), [[C2]](s32)
+ ; GFX9: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX9: [[BUILD_VECTOR_TRUNC6:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C3]](s32), [[C3]](s32)
+ ; GFX9: [[SMAX:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR_TRUNC6]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(<2 x s16>) = G_SUB [[SMAX]], [[BUILD_VECTOR_TRUNC4]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR_TRUNC6]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(<2 x s16>) = G_SUB [[SMIN]], [[BUILD_VECTOR_TRUNC5]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[SUB]], [[BUILD_VECTOR_TRUNC2]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX9: [[SUB2:%[0-9]+]]:_(<2 x s16>) = G_SUB [[BUILD_VECTOR_TRUNC]], [[SMIN1]]
+ ; GFX9: [[BUILD_VECTOR_TRUNC7:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C1]](s32), [[C1]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC8:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C2]](s32), [[C2]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC9:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C3]](s32), [[C3]](s32)
+ ; GFX9: [[SMAX2:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[BUILD_VECTOR_TRUNC1]], [[BUILD_VECTOR_TRUNC9]]
+ ; GFX9: [[SUB3:%[0-9]+]]:_(<2 x s16>) = G_SUB [[SMAX2]], [[BUILD_VECTOR_TRUNC7]]
+ ; GFX9: [[SMIN2:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[BUILD_VECTOR_TRUNC1]], [[BUILD_VECTOR_TRUNC9]]
+ ; GFX9: [[SUB4:%[0-9]+]]:_(<2 x s16>) = G_SUB [[SMIN2]], [[BUILD_VECTOR_TRUNC8]]
+ ; GFX9: [[SMAX3:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[SUB3]], [[BUILD_VECTOR_TRUNC3]]
+ ; GFX9: [[SMIN3:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[SMAX3]], [[SUB4]]
+ ; GFX9: [[SUB5:%[0-9]+]]:_(<2 x s16>) = G_SUB [[BUILD_VECTOR_TRUNC1]], [[SMIN3]]
+ ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[SUB2]](<2 x s16>), [[SUB5]](<2 x s16>), [[DEF2]](<2 x s16>)
+ ; GFX9: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<6 x s16>), 0
+ ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
+ ; GFX9: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[EXTRACT]](<3 x s16>), [[EXTRACT1]](<3 x s16>)
+ ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>)
%0:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<3 x s16>), %2:_(<3 x s16>) = G_UNMERGE_VALUES %0
%3:_(<3 x s16>) = G_SSUBSAT %1, %2
@@ -359,36 +721,180 @@ body: |
; GFX6-LABEL: name: ssubsat_v4s16
; GFX6: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
; GFX6: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX6: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX6: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX6: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV]], [[UV4]]
- ; GFX6: [[SSUBSAT1:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV1]], [[UV5]]
- ; GFX6: [[SSUBSAT2:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV2]], [[UV6]]
- ; GFX6: [[SSUBSAT3:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV3]], [[UV7]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[SSUBSAT]](s16), [[SSUBSAT1]](s16), [[SSUBSAT2]](s16), [[SSUBSAT3]](s16)
- ; GFX6: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; GFX6: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX6: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX6: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX6: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX6: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX6: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX6: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[SHL]], [[C3]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SMAX]], [[C1]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[SHL]], [[C3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SMIN]], [[C2]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB]], [[SHL1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[SMIN1]]
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s32) = G_ASHR [[SUB2]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C]](s32)
+ ; GFX6: [[SMAX2:%[0-9]+]]:_(s32) = G_SMAX [[SHL2]], [[C3]]
+ ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[SMAX2]], [[C1]]
+ ; GFX6: [[SMIN2:%[0-9]+]]:_(s32) = G_SMIN [[SHL2]], [[C3]]
+ ; GFX6: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SMIN2]], [[C2]]
+ ; GFX6: [[SMAX3:%[0-9]+]]:_(s32) = G_SMAX [[SUB3]], [[SHL3]]
+ ; GFX6: [[SMIN3:%[0-9]+]]:_(s32) = G_SMIN [[SMAX3]], [[SUB4]]
+ ; GFX6: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[SHL2]], [[SMIN3]]
+ ; GFX6: [[ASHR1:%[0-9]+]]:_(s32) = G_ASHR [[SUB5]], [[C]](s32)
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32)
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[COPY6]], [[C]](s32)
+ ; GFX6: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[COPY7]], [[C]](s32)
+ ; GFX6: [[SMAX4:%[0-9]+]]:_(s32) = G_SMAX [[SHL4]], [[C3]]
+ ; GFX6: [[SUB6:%[0-9]+]]:_(s32) = G_SUB [[SMAX4]], [[C1]]
+ ; GFX6: [[SMIN4:%[0-9]+]]:_(s32) = G_SMIN [[SHL4]], [[C3]]
+ ; GFX6: [[SUB7:%[0-9]+]]:_(s32) = G_SUB [[SMIN4]], [[C2]]
+ ; GFX6: [[SMAX5:%[0-9]+]]:_(s32) = G_SMAX [[SUB6]], [[SHL5]]
+ ; GFX6: [[SMIN5:%[0-9]+]]:_(s32) = G_SMIN [[SMAX5]], [[SUB7]]
+ ; GFX6: [[SUB8:%[0-9]+]]:_(s32) = G_SUB [[SHL4]], [[SMIN5]]
+ ; GFX6: [[ASHR2:%[0-9]+]]:_(s32) = G_ASHR [[SUB8]], [[C]](s32)
+ ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
+ ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
+ ; GFX6: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[COPY8]], [[C]](s32)
+ ; GFX6: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[COPY9]], [[C]](s32)
+ ; GFX6: [[SMAX6:%[0-9]+]]:_(s32) = G_SMAX [[SHL6]], [[C3]]
+ ; GFX6: [[SUB9:%[0-9]+]]:_(s32) = G_SUB [[SMAX6]], [[C1]]
+ ; GFX6: [[SMIN6:%[0-9]+]]:_(s32) = G_SMIN [[SHL6]], [[C3]]
+ ; GFX6: [[SUB10:%[0-9]+]]:_(s32) = G_SUB [[SMIN6]], [[C2]]
+ ; GFX6: [[SMAX7:%[0-9]+]]:_(s32) = G_SMAX [[SUB9]], [[SHL7]]
+ ; GFX6: [[SMIN7:%[0-9]+]]:_(s32) = G_SMIN [[SMAX7]], [[SUB10]]
+ ; GFX6: [[SUB11:%[0-9]+]]:_(s32) = G_SUB [[SHL6]], [[SMIN7]]
+ ; GFX6: [[ASHR3:%[0-9]+]]:_(s32) = G_ASHR [[SUB11]], [[C]](s32)
+ ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY [[ASHR]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C4]]
+ ; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY [[ASHR1]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY11]], [[C4]]
+ ; GFX6: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL8]]
+ ; GFX6: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY [[ASHR2]](s32)
+ ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C4]]
+ ; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY [[ASHR3]](s32)
+ ; GFX6: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C4]]
+ ; GFX6: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C]](s32)
+ ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL9]]
+ ; GFX6: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX6: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX6: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
; GFX8-LABEL: name: ssubsat_v4s16
; GFX8: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
; GFX8: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX8: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX8: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX8: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV]], [[UV4]]
- ; GFX8: [[SSUBSAT1:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV1]], [[UV5]]
- ; GFX8: [[SSUBSAT2:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV2]], [[UV6]]
- ; GFX8: [[SSUBSAT3:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV3]], [[UV7]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[SSUBSAT]](s16), [[SSUBSAT1]](s16), [[SSUBSAT2]](s16), [[SSUBSAT3]](s16)
- ; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; GFX8: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX8: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX8: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX8: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX8: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX8: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX8: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX8: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 32767
+ ; GFX8: [[C2:%[0-9]+]]:_(s16) = G_CONSTANT i16 -32768
+ ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC]], [[C3]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SMAX]], [[C1]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC]], [[C3]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[SMIN]], [[C2]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s16) = G_SMAX [[SUB]], [[TRUNC4]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s16) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[TRUNC]], [[SMIN1]]
+ ; GFX8: [[SMAX2:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC1]], [[C3]]
+ ; GFX8: [[SUB3:%[0-9]+]]:_(s16) = G_SUB [[SMAX2]], [[C1]]
+ ; GFX8: [[SMIN2:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC1]], [[C3]]
+ ; GFX8: [[SUB4:%[0-9]+]]:_(s16) = G_SUB [[SMIN2]], [[C2]]
+ ; GFX8: [[SMAX3:%[0-9]+]]:_(s16) = G_SMAX [[SUB3]], [[TRUNC5]]
+ ; GFX8: [[SMIN3:%[0-9]+]]:_(s16) = G_SMIN [[SMAX3]], [[SUB4]]
+ ; GFX8: [[SUB5:%[0-9]+]]:_(s16) = G_SUB [[TRUNC1]], [[SMIN3]]
+ ; GFX8: [[SMAX4:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC2]], [[C3]]
+ ; GFX8: [[SUB6:%[0-9]+]]:_(s16) = G_SUB [[SMAX4]], [[C1]]
+ ; GFX8: [[SMIN4:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC2]], [[C3]]
+ ; GFX8: [[SUB7:%[0-9]+]]:_(s16) = G_SUB [[SMIN4]], [[C2]]
+ ; GFX8: [[SMAX5:%[0-9]+]]:_(s16) = G_SMAX [[SUB6]], [[TRUNC6]]
+ ; GFX8: [[SMIN5:%[0-9]+]]:_(s16) = G_SMIN [[SMAX5]], [[SUB7]]
+ ; GFX8: [[SUB8:%[0-9]+]]:_(s16) = G_SUB [[TRUNC2]], [[SMIN5]]
+ ; GFX8: [[SMAX6:%[0-9]+]]:_(s16) = G_SMAX [[TRUNC3]], [[C3]]
+ ; GFX8: [[SUB9:%[0-9]+]]:_(s16) = G_SUB [[SMAX6]], [[C1]]
+ ; GFX8: [[SMIN6:%[0-9]+]]:_(s16) = G_SMIN [[TRUNC3]], [[C3]]
+ ; GFX8: [[SUB10:%[0-9]+]]:_(s16) = G_SUB [[SMIN6]], [[C2]]
+ ; GFX8: [[SMAX7:%[0-9]+]]:_(s16) = G_SMAX [[SUB9]], [[TRUNC7]]
+ ; GFX8: [[SMIN7:%[0-9]+]]:_(s16) = G_SMIN [[SMAX7]], [[SUB10]]
+ ; GFX8: [[SUB11:%[0-9]+]]:_(s16) = G_SUB [[TRUNC3]], [[SMIN7]]
+ ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[SUB2]](s16)
+ ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[SUB5]](s16)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32)
+ ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
+ ; GFX8: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[SUB8]](s16)
+ ; GFX8: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[SUB11]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[ZEXT3]], [[C]](s32)
+ ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL1]]
+ ; GFX8: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX8: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
; GFX9-LABEL: name: ssubsat_v4s16
; GFX9: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX9: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX9: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX9: [[SSUBSAT:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV]], [[UV4]]
- ; GFX9: [[SSUBSAT1:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV1]], [[UV5]]
- ; GFX9: [[SSUBSAT2:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV2]], [[UV6]]
- ; GFX9: [[SSUBSAT3:%[0-9]+]]:_(s16) = G_SSUBSAT [[UV3]], [[UV7]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[SSUBSAT]](s16), [[SSUBSAT1]](s16), [[SSUBSAT2]](s16), [[SSUBSAT3]](s16)
- ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; GFX9: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX9: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 32767
+ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C]](s32), [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -32768
+ ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C1]](s32), [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C2]](s32), [[C2]](s32)
+ ; GFX9: [[SMAX:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[UV]], [[BUILD_VECTOR_TRUNC2]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(<2 x s16>) = G_SUB [[SMAX]], [[BUILD_VECTOR_TRUNC]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[UV]], [[BUILD_VECTOR_TRUNC2]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(<2 x s16>) = G_SUB [[SMIN]], [[BUILD_VECTOR_TRUNC1]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[SUB]], [[UV2]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX9: [[SUB2:%[0-9]+]]:_(<2 x s16>) = G_SUB [[UV]], [[SMIN1]]
+ ; GFX9: [[BUILD_VECTOR_TRUNC3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C]](s32), [[C]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C1]](s32), [[C1]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C2]](s32), [[C2]](s32)
+ ; GFX9: [[SMAX2:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[UV1]], [[BUILD_VECTOR_TRUNC5]]
+ ; GFX9: [[SUB3:%[0-9]+]]:_(<2 x s16>) = G_SUB [[SMAX2]], [[BUILD_VECTOR_TRUNC3]]
+ ; GFX9: [[SMIN2:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[UV1]], [[BUILD_VECTOR_TRUNC5]]
+ ; GFX9: [[SUB4:%[0-9]+]]:_(<2 x s16>) = G_SUB [[SMIN2]], [[BUILD_VECTOR_TRUNC4]]
+ ; GFX9: [[SMAX3:%[0-9]+]]:_(<2 x s16>) = G_SMAX [[SUB3]], [[UV3]]
+ ; GFX9: [[SMIN3:%[0-9]+]]:_(<2 x s16>) = G_SMIN [[SMAX3]], [[SUB4]]
+ ; GFX9: [[SUB5:%[0-9]+]]:_(<2 x s16>) = G_SUB [[UV1]], [[SMIN3]]
+ ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[SUB2]](<2 x s16>), [[SUB5]](<2 x s16>)
+ ; GFX9: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr2_vgpr3
%2:_(<4 x s16>) = G_SSUBSAT %0, %1
@@ -404,18 +910,45 @@ body: |
; GFX6-LABEL: name: ssubsat_s32
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[SSUBSAT:%[0-9]+]]:_(s32) = G_SSUBSAT [[COPY]], [[COPY1]]
- ; GFX6: $vgpr0 = COPY [[SSUBSAT]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[COPY]], [[C2]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SMAX]], [[C]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[COPY]], [[C2]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SMIN]], [[C1]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB]], [[COPY1]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[SMIN1]]
+ ; GFX6: $vgpr0 = COPY [[SUB2]](s32)
; GFX8-LABEL: name: ssubsat_s32
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX8: [[SSUBSAT:%[0-9]+]]:_(s32) = G_SSUBSAT [[COPY]], [[COPY1]]
- ; GFX8: $vgpr0 = COPY [[SSUBSAT]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[COPY]], [[C2]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SMAX]], [[C]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[COPY]], [[C2]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SMIN]], [[C1]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB]], [[COPY1]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[SMIN1]]
+ ; GFX8: $vgpr0 = COPY [[SUB2]](s32)
; GFX9-LABEL: name: ssubsat_s32
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9: [[SSUBSAT:%[0-9]+]]:_(s32) = G_SSUBSAT [[COPY]], [[COPY1]]
- ; GFX9: $vgpr0 = COPY [[SSUBSAT]](s32)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX9: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[COPY]], [[C2]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SMAX]], [[C]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[COPY]], [[C2]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SMIN]], [[C1]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB]], [[COPY1]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX9: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[SMIN1]]
+ ; GFX9: $vgpr0 = COPY [[SUB2]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_SSUBSAT %0, %1
@@ -433,27 +966,72 @@ body: |
; GFX6: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX6: [[SSUBSAT:%[0-9]+]]:_(s32) = G_SSUBSAT [[UV]], [[UV2]]
- ; GFX6: [[SSUBSAT1:%[0-9]+]]:_(s32) = G_SSUBSAT [[UV1]], [[UV3]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SSUBSAT]](s32), [[SSUBSAT1]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[UV]], [[C2]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SMAX]], [[C]]
+ ; GFX6: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[UV]], [[C2]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SMIN]], [[C1]]
+ ; GFX6: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB]], [[UV2]]
+ ; GFX6: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[UV]], [[SMIN1]]
+ ; GFX6: [[SMAX2:%[0-9]+]]:_(s32) = G_SMAX [[UV1]], [[C2]]
+ ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[SMAX2]], [[C]]
+ ; GFX6: [[SMIN2:%[0-9]+]]:_(s32) = G_SMIN [[UV1]], [[C2]]
+ ; GFX6: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SMIN2]], [[C1]]
+ ; GFX6: [[SMAX3:%[0-9]+]]:_(s32) = G_SMAX [[SUB3]], [[UV3]]
+ ; GFX6: [[SMIN3:%[0-9]+]]:_(s32) = G_SMIN [[SMAX3]], [[SUB4]]
+ ; GFX6: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[UV1]], [[SMIN3]]
+ ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SUB2]](s32), [[SUB5]](s32)
; GFX6: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
; GFX8-LABEL: name: ssubsat_v2s32
; GFX8: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX8: [[SSUBSAT:%[0-9]+]]:_(s32) = G_SSUBSAT [[UV]], [[UV2]]
- ; GFX8: [[SSUBSAT1:%[0-9]+]]:_(s32) = G_SSUBSAT [[UV1]], [[UV3]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SSUBSAT]](s32), [[SSUBSAT1]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX8: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[UV]], [[C2]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SMAX]], [[C]]
+ ; GFX8: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[UV]], [[C2]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SMIN]], [[C1]]
+ ; GFX8: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB]], [[UV2]]
+ ; GFX8: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[UV]], [[SMIN1]]
+ ; GFX8: [[SMAX2:%[0-9]+]]:_(s32) = G_SMAX [[UV1]], [[C2]]
+ ; GFX8: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[SMAX2]], [[C]]
+ ; GFX8: [[SMIN2:%[0-9]+]]:_(s32) = G_SMIN [[UV1]], [[C2]]
+ ; GFX8: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SMIN2]], [[C1]]
+ ; GFX8: [[SMAX3:%[0-9]+]]:_(s32) = G_SMAX [[SUB3]], [[UV3]]
+ ; GFX8: [[SMIN3:%[0-9]+]]:_(s32) = G_SMIN [[SMAX3]], [[SUB4]]
+ ; GFX8: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[UV1]], [[SMIN3]]
+ ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SUB2]](s32), [[SUB5]](s32)
; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
; GFX9-LABEL: name: ssubsat_v2s32
; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX9: [[SSUBSAT:%[0-9]+]]:_(s32) = G_SSUBSAT [[UV]], [[UV2]]
- ; GFX9: [[SSUBSAT1:%[0-9]+]]:_(s32) = G_SSUBSAT [[UV1]], [[UV3]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SSUBSAT]](s32), [[SSUBSAT1]](s32)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 2147483647
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -2147483648
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX9: [[SMAX:%[0-9]+]]:_(s32) = G_SMAX [[UV]], [[C2]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SMAX]], [[C]]
+ ; GFX9: [[SMIN:%[0-9]+]]:_(s32) = G_SMIN [[UV]], [[C2]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SMIN]], [[C1]]
+ ; GFX9: [[SMAX1:%[0-9]+]]:_(s32) = G_SMAX [[SUB]], [[UV2]]
+ ; GFX9: [[SMIN1:%[0-9]+]]:_(s32) = G_SMIN [[SMAX1]], [[SUB1]]
+ ; GFX9: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[UV]], [[SMIN1]]
+ ; GFX9: [[SMAX2:%[0-9]+]]:_(s32) = G_SMAX [[UV1]], [[C2]]
+ ; GFX9: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[SMAX2]], [[C]]
+ ; GFX9: [[SMIN2:%[0-9]+]]:_(s32) = G_SMIN [[UV1]], [[C2]]
+ ; GFX9: [[SUB4:%[0-9]+]]:_(s32) = G_SUB [[SMIN2]], [[C1]]
+ ; GFX9: [[SMAX3:%[0-9]+]]:_(s32) = G_SMAX [[SUB3]], [[UV3]]
+ ; GFX9: [[SMIN3:%[0-9]+]]:_(s32) = G_SMIN [[SMAX3]], [[SUB4]]
+ ; GFX9: [[SUB5:%[0-9]+]]:_(s32) = G_SUB [[UV1]], [[SMIN3]]
+ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SUB2]](s32), [[SUB5]](s32)
; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
@@ -470,18 +1048,69 @@ body: |
; GFX6-LABEL: name: ssubsat_s64
; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX6: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX6: [[SSUBSAT:%[0-9]+]]:_(s64) = G_SSUBSAT [[COPY]], [[COPY1]]
- ; GFX6: $vgpr0_vgpr1 = COPY [[SSUBSAT]](s64)
+ ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX6: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
+ ; GFX6: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
+ ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
+ ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
+ ; GFX6: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; GFX6: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
+ ; GFX6: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX6: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
+ ; GFX6: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
+ ; GFX6: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
; GFX8-LABEL: name: ssubsat_s64
; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX8: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX8: [[SSUBSAT:%[0-9]+]]:_(s64) = G_SSUBSAT [[COPY]], [[COPY1]]
- ; GFX8: $vgpr0_vgpr1 = COPY [[SSUBSAT]](s64)
+ ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX8: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
+ ; GFX8: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
+ ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
+ ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
+ ; GFX8: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; GFX8: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
+ ; GFX8: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
+ ; GFX8: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
+ ; GFX8: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX8: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
; GFX9-LABEL: name: ssubsat_s64
; GFX9: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX9: [[SSUBSAT:%[0-9]+]]:_(s64) = G_SSUBSAT [[COPY]], [[COPY1]]
- ; GFX9: $vgpr0_vgpr1 = COPY [[SSUBSAT]](s64)
+ ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX9: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
+ ; GFX9: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
+ ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[COPY]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[COPY1]](s64), [[C]]
+ ; GFX9: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; GFX9: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
+ ; GFX9: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX9: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
+ ; GFX9: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
+ ; GFX9: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_SSUBSAT %0, %1
@@ -499,27 +1128,120 @@ body: |
; GFX6: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
; GFX6: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; GFX6: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX6: [[SSUBSAT:%[0-9]+]]:_(s64) = G_SSUBSAT [[UV]], [[UV2]]
- ; GFX6: [[SSUBSAT1:%[0-9]+]]:_(s64) = G_SSUBSAT [[UV1]], [[UV3]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SSUBSAT]](s64), [[SSUBSAT1]](s64)
+ ; GFX6: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+ ; GFX6: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
+ ; GFX6: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV4]], [[UV6]]
+ ; GFX6: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV5]], [[UV7]], [[USUBO1]]
+ ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
+ ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV2]](s64), [[C]]
+ ; GFX6: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX6: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX6: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; GFX6: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
+ ; GFX6: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX6: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
+ ; GFX6: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO1]]
+ ; GFX6: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX6: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+ ; GFX6: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
+ ; GFX6: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV12]], [[UV14]]
+ ; GFX6: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV13]], [[UV15]], [[USUBO3]]
+ ; GFX6: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
+ ; GFX6: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
+ ; GFX6: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV3]](s64), [[C]]
+ ; GFX6: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
+ ; GFX6: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX6: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
+ ; GFX6: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX6: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
+ ; GFX6: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO3]]
+ ; GFX6: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
+ ; GFX6: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX6: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
; GFX8-LABEL: name: ssubsat_v2s64
; GFX8: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
; GFX8: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; GFX8: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX8: [[SSUBSAT:%[0-9]+]]:_(s64) = G_SSUBSAT [[UV]], [[UV2]]
- ; GFX8: [[SSUBSAT1:%[0-9]+]]:_(s64) = G_SSUBSAT [[UV1]], [[UV3]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SSUBSAT]](s64), [[SSUBSAT1]](s64)
+ ; GFX8: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+ ; GFX8: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
+ ; GFX8: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV4]], [[UV6]]
+ ; GFX8: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV5]], [[UV7]], [[USUBO1]]
+ ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
+ ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV2]](s64), [[C]]
+ ; GFX8: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX8: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX8: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; GFX8: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
+ ; GFX8: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
+ ; GFX8: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO1]]
+ ; GFX8: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX8: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX8: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+ ; GFX8: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
+ ; GFX8: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV12]], [[UV14]]
+ ; GFX8: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV13]], [[UV15]], [[USUBO3]]
+ ; GFX8: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
+ ; GFX8: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
+ ; GFX8: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV3]](s64), [[C]]
+ ; GFX8: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
+ ; GFX8: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX8: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
+ ; GFX8: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX8: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
+ ; GFX8: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO3]]
+ ; GFX8: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
+ ; GFX8: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX8: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
; GFX9-LABEL: name: ssubsat_v2s64
; GFX9: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX9: [[SSUBSAT:%[0-9]+]]:_(s64) = G_SSUBSAT [[UV]], [[UV2]]
- ; GFX9: [[SSUBSAT1:%[0-9]+]]:_(s64) = G_SSUBSAT [[UV1]], [[UV3]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SSUBSAT]](s64), [[SSUBSAT1]](s64)
+ ; GFX9: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+ ; GFX9: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
+ ; GFX9: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV4]], [[UV6]]
+ ; GFX9: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV5]], [[UV7]], [[USUBO1]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
+ ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV]](s64), [[UV]]
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV2]](s64), [[C]]
+ ; GFX9: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[ICMP1]], [[ICMP]]
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 63
+ ; GFX9: [[ASHR:%[0-9]+]]:_(s64) = G_ASHR [[MV]], [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 -9223372036854775808
+ ; GFX9: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR]](s64)
+ ; GFX9: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX9: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
+ ; GFX9: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO1]]
+ ; GFX9: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[XOR]](s1), [[MV1]], [[MV]]
+ ; GFX9: [[UV12:%[0-9]+]]:_(s32), [[UV13:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+ ; GFX9: [[UV14:%[0-9]+]]:_(s32), [[UV15:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
+ ; GFX9: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV12]], [[UV14]]
+ ; GFX9: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV13]], [[UV15]], [[USUBO3]]
+ ; GFX9: [[MV2:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
+ ; GFX9: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(slt), [[MV2]](s64), [[UV1]]
+ ; GFX9: [[ICMP3:%[0-9]+]]:_(s1) = G_ICMP intpred(sgt), [[UV3]](s64), [[C]]
+ ; GFX9: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[ICMP3]], [[ICMP2]]
+ ; GFX9: [[ASHR1:%[0-9]+]]:_(s64) = G_ASHR [[MV2]], [[C1]](s32)
+ ; GFX9: [[UV16:%[0-9]+]]:_(s32), [[UV17:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[ASHR1]](s64)
+ ; GFX9: [[UV18:%[0-9]+]]:_(s32), [[UV19:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[C2]](s64)
+ ; GFX9: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV16]], [[UV18]]
+ ; GFX9: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV17]], [[UV19]], [[UADDO3]]
+ ; GFX9: [[MV3:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[XOR1]](s1), [[MV3]], [[MV2]]
+ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-uaddsat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-uaddsat.mir
index e080bde81b3a..690bf34482dd 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-uaddsat.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-uaddsat.mir
@@ -12,51 +12,48 @@ body: |
; GFX6-LABEL: name: uaddsat_s7
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7)
- ; GFX6: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s7)
- ; GFX6: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9
- ; GFX6: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX6: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX6: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[SHL]], [[SHL1]]
- ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
- ; GFX6: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UADDSAT]](s16)
- ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT1]], [[ZEXT]](s32)
- ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX6: [[TRUNC3:%[0-9]+]]:_(s7) = G_TRUNC [[TRUNC2]](s16)
- ; GFX6: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC3]](s7)
- ; GFX6: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 25
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[SHL]], [[C1]]
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[XOR]], [[SHL1]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SHL]], [[UMIN]]
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: $vgpr0 = COPY [[COPY4]](s32)
; GFX8-LABEL: name: uaddsat_s7
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX8: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32)
- ; GFX8: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7)
- ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s7)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9
- ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX8: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[SHL]], [[SHL1]]
- ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[UADDSAT]], [[C]](s16)
- ; GFX8: [[TRUNC2:%[0-9]+]]:_(s7) = G_TRUNC [[LSHR]](s16)
- ; GFX8: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s7)
- ; GFX8: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX8: [[XOR:%[0-9]+]]:_(s16) = G_XOR [[SHL]], [[C1]]
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[XOR]], [[SHL1]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[SHL]], [[UMIN]]
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[ADD]], [[C]](s16)
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
+ ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: uaddsat_s7
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32)
- ; GFX9: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7)
- ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s7)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9
- ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX9: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[SHL]], [[SHL1]]
- ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[UADDSAT]], [[C]](s16)
- ; GFX9: [[TRUNC2:%[0-9]+]]:_(s7) = G_TRUNC [[LSHR]](s16)
- ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s7)
- ; GFX9: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX9: [[XOR:%[0-9]+]]:_(s16) = G_XOR [[SHL]], [[C1]]
+ ; GFX9: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[XOR]], [[SHL1]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[SHL]], [[UMIN]]
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[ADD]], [[C]](s16)
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
+ ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s7) = G_TRUNC %0
@@ -75,51 +72,48 @@ body: |
; GFX6-LABEL: name: uaddsat_s8
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8)
- ; GFX6: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s8)
- ; GFX6: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX6: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX6: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX6: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[SHL]], [[SHL1]]
- ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
- ; GFX6: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UADDSAT]](s16)
- ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT1]], [[ZEXT]](s32)
- ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX6: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC2]](s16)
- ; GFX6: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC3]](s8)
- ; GFX6: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[SHL]], [[C1]]
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[XOR]], [[SHL1]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SHL]], [[UMIN]]
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: $vgpr0 = COPY [[COPY4]](s32)
; GFX8-LABEL: name: uaddsat_s8
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX8: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; GFX8: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8)
- ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s8)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX8: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[SHL]], [[SHL1]]
- ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[UADDSAT]], [[C]](s16)
- ; GFX8: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[LSHR]](s16)
- ; GFX8: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s8)
- ; GFX8: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX8: [[XOR:%[0-9]+]]:_(s16) = G_XOR [[SHL]], [[C1]]
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[XOR]], [[SHL1]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[SHL]], [[UMIN]]
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[ADD]], [[C]](s16)
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
+ ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: uaddsat_s8
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; GFX9: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8)
- ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s8)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX9: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[SHL]], [[SHL1]]
- ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[UADDSAT]], [[C]](s16)
- ; GFX9: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[LSHR]](s16)
- ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s8)
- ; GFX9: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX9: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX9: [[XOR:%[0-9]+]]:_(s16) = G_XOR [[SHL]], [[C1]]
+ ; GFX9: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[XOR]], [[SHL1]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[SHL]], [[UMIN]]
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[ADD]], [[C]](s16)
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
+ ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s8) = G_TRUNC %0
@@ -138,75 +132,122 @@ body: |
; GFX6-LABEL: name: uaddsat_v2s8
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16)
- ; GFX6: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16)
- ; GFX6: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>)
- ; GFX6: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>)
- ; GFX6: [[UADDSAT:%[0-9]+]]:_(s8) = G_UADDSAT [[UV]], [[UV2]]
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
- ; GFX6: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
- ; GFX6: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX6: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX6: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX6: [[UADDSAT1:%[0-9]+]]:_(s16) = G_UADDSAT [[SHL]], [[SHL1]]
- ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
- ; GFX6: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[UADDSAT1]](s16)
- ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT1]], [[ZEXT]](s32)
- ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX6: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC2]](s16)
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[UADDSAT]](s8), [[TRUNC3]](s8)
- ; GFX6: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>)
- ; GFX6: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8)
- ; GFX6: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16)
- ; GFX6: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
+ ; GFX6: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
+ ; GFX6: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
+ ; GFX6: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C2]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C2]](s32)
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[SHL]], [[C3]]
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[XOR]], [[SHL1]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SHL]], [[UMIN]]
+ ; GFX6: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C2]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C2]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C2]](s32)
+ ; GFX6: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[SHL2]], [[C3]]
+ ; GFX6: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[XOR1]], [[SHL3]]
+ ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[SHL2]], [[UMIN1]]
+ ; GFX6: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[ADD1]], [[C2]](s32)
+ ; GFX6: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
+ ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR6]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C4]]
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+ ; GFX6: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C5]]
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[COPY6]](s32)
+ ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL4]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
+ ; GFX6: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
+ ; GFX6: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX8-LABEL: name: uaddsat_v2s8
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
+ ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
+ ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
+ ; GFX8: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
+ ; GFX8: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16)
- ; GFX8: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16)
- ; GFX8: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>)
- ; GFX8: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>)
- ; GFX8: [[UADDSAT:%[0-9]+]]:_(s8) = G_UADDSAT [[UV]], [[UV2]]
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
- ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
- ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX8: [[UADDSAT1:%[0-9]+]]:_(s16) = G_UADDSAT [[SHL]], [[SHL1]]
- ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[UADDSAT1]], [[C]](s16)
- ; GFX8: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[LSHR]](s16)
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[UADDSAT]](s8), [[TRUNC2]](s8)
- ; GFX8: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>)
- ; GFX8: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8)
- ; GFX8: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16)
- ; GFX8: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
+ ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C3]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C3]](s16)
+ ; GFX8: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX8: [[XOR:%[0-9]+]]:_(s16) = G_XOR [[SHL]], [[C4]]
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[XOR]], [[SHL1]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[SHL]], [[UMIN]]
+ ; GFX8: [[LSHR6:%[0-9]+]]:_(s16) = G_LSHR [[ADD]], [[C3]](s16)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C3]](s16)
+ ; GFX8: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[C3]](s16)
+ ; GFX8: [[XOR1:%[0-9]+]]:_(s16) = G_XOR [[SHL2]], [[C4]]
+ ; GFX8: [[UMIN1:%[0-9]+]]:_(s16) = G_UMIN [[XOR1]], [[SHL3]]
+ ; GFX8: [[ADD1:%[0-9]+]]:_(s16) = G_ADD [[SHL2]], [[UMIN1]]
+ ; GFX8: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[ADD1]], [[C3]](s16)
+ ; GFX8: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
+ ; GFX8: [[COPY2:%[0-9]+]]:_(s16) = COPY [[LSHR6]](s16)
+ ; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[COPY2]], [[C5]]
+ ; GFX8: [[COPY3:%[0-9]+]]:_(s16) = COPY [[LSHR7]](s16)
+ ; GFX8: [[AND1:%[0-9]+]]:_(s16) = G_AND [[COPY3]], [[C5]]
+ ; GFX8: [[SHL4:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C3]](s16)
+ ; GFX8: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL4]]
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
+ ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: uaddsat_v2s8
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
+ ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
+ ; GFX9: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
+ ; GFX9: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16)
- ; GFX9: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16)
- ; GFX9: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>)
- ; GFX9: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>)
- ; GFX9: [[UADDSAT:%[0-9]+]]:_(s8) = G_UADDSAT [[UV]], [[UV2]]
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
- ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
- ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX9: [[UADDSAT1:%[0-9]+]]:_(s16) = G_UADDSAT [[SHL]], [[SHL1]]
- ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[UADDSAT1]], [[C]](s16)
- ; GFX9: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[LSHR]](s16)
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[UADDSAT]](s8), [[TRUNC2]](s8)
- ; GFX9: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>)
- ; GFX9: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8)
- ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16)
- ; GFX9: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
+ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C3]](s16)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C3]](s16)
+ ; GFX9: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX9: [[XOR:%[0-9]+]]:_(s16) = G_XOR [[SHL]], [[C4]]
+ ; GFX9: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[XOR]], [[SHL1]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[SHL]], [[UMIN]]
+ ; GFX9: [[LSHR6:%[0-9]+]]:_(s16) = G_LSHR [[ADD]], [[C3]](s16)
+ ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX9: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C3]](s16)
+ ; GFX9: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[C3]](s16)
+ ; GFX9: [[XOR1:%[0-9]+]]:_(s16) = G_XOR [[SHL2]], [[C4]]
+ ; GFX9: [[UMIN1:%[0-9]+]]:_(s16) = G_UMIN [[XOR1]], [[SHL3]]
+ ; GFX9: [[ADD1:%[0-9]+]]:_(s16) = G_ADD [[SHL2]], [[UMIN1]]
+ ; GFX9: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[ADD1]], [[C3]](s16)
+ ; GFX9: [[C5:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
+ ; GFX9: [[COPY2:%[0-9]+]]:_(s16) = COPY [[LSHR6]](s16)
+ ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[COPY2]], [[C5]]
+ ; GFX9: [[COPY3:%[0-9]+]]:_(s16) = COPY [[LSHR7]](s16)
+ ; GFX9: [[AND1:%[0-9]+]]:_(s16) = G_AND [[COPY3]], [[C5]]
+ ; GFX9: [[SHL4:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C3]](s16)
+ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL4]]
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
+ ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_TRUNC %0
@@ -228,26 +269,39 @@ body: |
; GFX6-LABEL: name: uaddsat_s16
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[TRUNC]], [[TRUNC1]]
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UADDSAT]](s16)
- ; GFX6: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[SHL]], [[C1]]
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[XOR]], [[SHL1]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SHL]], [[UMIN]]
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: $vgpr0 = COPY [[COPY4]](s32)
; GFX8-LABEL: name: uaddsat_s16
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[TRUNC]], [[TRUNC1]]
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UADDSAT]](s16)
+ ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX8: [[XOR:%[0-9]+]]:_(s16) = G_XOR [[TRUNC]], [[C]]
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[XOR]], [[TRUNC1]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[UMIN]]
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ADD]](s16)
; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: uaddsat_s16
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[TRUNC]], [[TRUNC1]]
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[UADDSAT]](s16)
+ ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX9: [[XOR:%[0-9]+]]:_(s16) = G_XOR [[TRUNC]], [[C]]
+ ; GFX9: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[XOR]], [[TRUNC1]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[UMIN]]
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[ADD]](s16)
; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
@@ -267,30 +321,71 @@ body: |
; GFX6-LABEL: name: uaddsat_v2s16
; GFX6: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX6: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
- ; GFX6: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
- ; GFX6: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[UV]], [[UV2]]
- ; GFX6: [[UADDSAT1:%[0-9]+]]:_(s16) = G_UADDSAT [[UV1]], [[UV3]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[UADDSAT]](s16), [[UADDSAT1]](s16)
- ; GFX6: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX6: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX6: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[SHL]], [[C1]]
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[XOR]], [[SHL1]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SHL]], [[UMIN]]
+ ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C]](s32)
+ ; GFX6: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[SHL2]], [[C1]]
+ ; GFX6: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[XOR1]], [[SHL3]]
+ ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[SHL2]], [[UMIN1]]
+ ; GFX6: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[ADD1]], [[C]](s32)
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C2]]
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C2]]
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL4]]
+ ; GFX6: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX6: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
; GFX8-LABEL: name: uaddsat_v2s16
; GFX8: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX8: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
- ; GFX8: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
- ; GFX8: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[UV]], [[UV2]]
- ; GFX8: [[UADDSAT1:%[0-9]+]]:_(s16) = G_UADDSAT [[UV1]], [[UV3]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[UADDSAT]](s16), [[UADDSAT1]](s16)
- ; GFX8: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX8: [[XOR:%[0-9]+]]:_(s16) = G_XOR [[TRUNC]], [[C1]]
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[XOR]], [[TRUNC2]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[UMIN]]
+ ; GFX8: [[XOR1:%[0-9]+]]:_(s16) = G_XOR [[TRUNC1]], [[C1]]
+ ; GFX8: [[UMIN1:%[0-9]+]]:_(s16) = G_UMIN [[XOR1]], [[TRUNC3]]
+ ; GFX8: [[ADD1:%[0-9]+]]:_(s16) = G_ADD [[TRUNC1]], [[UMIN1]]
+ ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ADD]](s16)
+ ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ADD1]](s16)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32)
+ ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
+ ; GFX8: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX8: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
; GFX9-LABEL: name: uaddsat_v2s16
; GFX9: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX9: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
- ; GFX9: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
- ; GFX9: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[UV]], [[UV2]]
- ; GFX9: [[UADDSAT1:%[0-9]+]]:_(s16) = G_UADDSAT [[UV1]], [[UV3]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[UADDSAT]](s16), [[UADDSAT1]](s16)
- ; GFX9: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C]](s32), [[C]](s32)
+ ; GFX9: [[XOR:%[0-9]+]]:_(<2 x s16>) = G_XOR [[COPY]], [[BUILD_VECTOR_TRUNC]]
+ ; GFX9: [[UMIN:%[0-9]+]]:_(<2 x s16>) = G_UMIN [[XOR]], [[COPY1]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(<2 x s16>) = G_ADD [[COPY]], [[UMIN]]
+ ; GFX9: $vgpr0 = COPY [[ADD]](<2 x s16>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
%2:_(<2 x s16>) = G_UADDSAT %0, %1
@@ -306,42 +401,155 @@ body: |
; GFX6-LABEL: name: uaddsat_v3s16
; GFX6: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
; GFX6: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
- ; GFX6: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>)
- ; GFX6: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>)
- ; GFX6: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[UV2]], [[UV5]]
- ; GFX6: [[UADDSAT1:%[0-9]+]]:_(s16) = G_UADDSAT [[UV3]], [[UV6]]
- ; GFX6: [[UADDSAT2:%[0-9]+]]:_(s16) = G_UADDSAT [[UV4]], [[UV7]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[UADDSAT]](s16), [[UADDSAT1]](s16), [[UADDSAT2]](s16)
; GFX6: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX6: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
- ; GFX6: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[EXTRACT]](<3 x s16>)
- ; GFX6: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>)
+ ; GFX6: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV]](<3 x s16>), 0
+ ; GFX6: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT]](<4 x s16>)
+ ; GFX6: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX6: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX6: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV1]](<3 x s16>), 0
+ ; GFX6: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT1]](<4 x s16>)
+ ; GFX6: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX6: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; GFX6: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[SHL]], [[C1]]
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[XOR]], [[SHL1]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SHL]], [[UMIN]]
+ ; GFX6: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; GFX6: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[SHL2]], [[C1]]
+ ; GFX6: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[XOR1]], [[SHL3]]
+ ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[SHL2]], [[UMIN1]]
+ ; GFX6: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[ADD1]], [[C]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32)
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C]](s32)
+ ; GFX6: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[COPY6]], [[C]](s32)
+ ; GFX6: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[SHL4]], [[C1]]
+ ; GFX6: [[UMIN2:%[0-9]+]]:_(s32) = G_UMIN [[XOR2]], [[SHL5]]
+ ; GFX6: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[SHL4]], [[UMIN2]]
+ ; GFX6: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[ADD2]], [[C]](s32)
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C2]]
+ ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C2]]
+ ; GFX6: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL6]]
+ ; GFX6: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32)
+ ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C2]]
+ ; GFX6: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX6: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[C3]], [[C]](s32)
+ ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL7]]
+ ; GFX6: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX6: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX6: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+ ; GFX6: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
+ ; GFX6: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[EXTRACT]](<3 x s16>), [[EXTRACT1]](<3 x s16>)
+ ; GFX6: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>)
; GFX8-LABEL: name: uaddsat_v3s16
; GFX8: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
; GFX8: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
- ; GFX8: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>)
- ; GFX8: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>)
- ; GFX8: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[UV2]], [[UV5]]
- ; GFX8: [[UADDSAT1:%[0-9]+]]:_(s16) = G_UADDSAT [[UV3]], [[UV6]]
- ; GFX8: [[UADDSAT2:%[0-9]+]]:_(s16) = G_UADDSAT [[UV4]], [[UV7]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[UADDSAT]](s16), [[UADDSAT1]](s16), [[UADDSAT2]](s16)
; GFX8: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX8: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
- ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[EXTRACT]](<3 x s16>)
- ; GFX8: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>)
+ ; GFX8: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV]](<3 x s16>), 0
+ ; GFX8: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT]](<4 x s16>)
+ ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX8: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV1]](<3 x s16>), 0
+ ; GFX8: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT1]](<4 x s16>)
+ ; GFX8: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX8: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX8: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; GFX8: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX8: [[XOR:%[0-9]+]]:_(s16) = G_XOR [[TRUNC]], [[C1]]
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[XOR]], [[TRUNC3]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[UMIN]]
+ ; GFX8: [[XOR1:%[0-9]+]]:_(s16) = G_XOR [[TRUNC1]], [[C1]]
+ ; GFX8: [[UMIN1:%[0-9]+]]:_(s16) = G_UMIN [[XOR1]], [[TRUNC4]]
+ ; GFX8: [[ADD1:%[0-9]+]]:_(s16) = G_ADD [[TRUNC1]], [[UMIN1]]
+ ; GFX8: [[XOR2:%[0-9]+]]:_(s16) = G_XOR [[TRUNC2]], [[C1]]
+ ; GFX8: [[UMIN2:%[0-9]+]]:_(s16) = G_UMIN [[XOR2]], [[TRUNC5]]
+ ; GFX8: [[ADD2:%[0-9]+]]:_(s16) = G_ADD [[TRUNC2]], [[UMIN2]]
+ ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ADD]](s16)
+ ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ADD1]](s16)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32)
+ ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
+ ; GFX8: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[ADD2]](s16)
+ ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[C2]], [[C]](s32)
+ ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL1]]
+ ; GFX8: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX8: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+ ; GFX8: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
+ ; GFX8: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[EXTRACT]](<3 x s16>), [[EXTRACT1]](<3 x s16>)
+ ; GFX8: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>)
; GFX9-LABEL: name: uaddsat_v3s16
; GFX9: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
; GFX9: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
- ; GFX9: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>)
- ; GFX9: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>)
- ; GFX9: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[UV2]], [[UV5]]
- ; GFX9: [[UADDSAT1:%[0-9]+]]:_(s16) = G_UADDSAT [[UV3]], [[UV6]]
- ; GFX9: [[UADDSAT2:%[0-9]+]]:_(s16) = G_UADDSAT [[UV4]], [[UV7]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[UADDSAT]](s16), [[UADDSAT1]](s16), [[UADDSAT2]](s16)
; GFX9: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX9: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
- ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[EXTRACT]](<3 x s16>)
- ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>)
+ ; GFX9: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV]](<3 x s16>), 0
+ ; GFX9: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT]](<4 x s16>)
+ ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32)
+ ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX9: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY3]](s32), [[DEF1]](s32)
+ ; GFX9: [[DEF2:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF
+ ; GFX9: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV1]](<3 x s16>), 0
+ ; GFX9: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT1]](<4 x s16>)
+ ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
+ ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY4]](s32), [[COPY5]](s32)
+ ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY6]](s32), [[DEF1]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX9: [[BUILD_VECTOR_TRUNC4:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C1]](s32), [[C1]](s32)
+ ; GFX9: [[XOR:%[0-9]+]]:_(<2 x s16>) = G_XOR [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR_TRUNC4]]
+ ; GFX9: [[UMIN:%[0-9]+]]:_(<2 x s16>) = G_UMIN [[XOR]], [[BUILD_VECTOR_TRUNC2]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(<2 x s16>) = G_ADD [[BUILD_VECTOR_TRUNC]], [[UMIN]]
+ ; GFX9: [[BUILD_VECTOR_TRUNC5:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C1]](s32), [[C1]](s32)
+ ; GFX9: [[XOR1:%[0-9]+]]:_(<2 x s16>) = G_XOR [[BUILD_VECTOR_TRUNC1]], [[BUILD_VECTOR_TRUNC5]]
+ ; GFX9: [[UMIN1:%[0-9]+]]:_(<2 x s16>) = G_UMIN [[XOR1]], [[BUILD_VECTOR_TRUNC3]]
+ ; GFX9: [[ADD1:%[0-9]+]]:_(<2 x s16>) = G_ADD [[BUILD_VECTOR_TRUNC1]], [[UMIN1]]
+ ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[ADD]](<2 x s16>), [[ADD1]](<2 x s16>), [[DEF2]](<2 x s16>)
+ ; GFX9: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<6 x s16>), 0
+ ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
+ ; GFX9: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[EXTRACT]](<3 x s16>), [[EXTRACT1]](<3 x s16>)
+ ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>)
%0:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<3 x s16>), %2:_(<3 x s16>) = G_UNMERGE_VALUES %0
%3:_(<3 x s16>) = G_UADDSAT %1, %2
@@ -359,36 +567,130 @@ body: |
; GFX6-LABEL: name: uaddsat_v4s16
; GFX6: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
; GFX6: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX6: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX6: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX6: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[UV]], [[UV4]]
- ; GFX6: [[UADDSAT1:%[0-9]+]]:_(s16) = G_UADDSAT [[UV1]], [[UV5]]
- ; GFX6: [[UADDSAT2:%[0-9]+]]:_(s16) = G_UADDSAT [[UV2]], [[UV6]]
- ; GFX6: [[UADDSAT3:%[0-9]+]]:_(s16) = G_UADDSAT [[UV3]], [[UV7]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[UADDSAT]](s16), [[UADDSAT1]](s16), [[UADDSAT2]](s16), [[UADDSAT3]](s16)
- ; GFX6: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; GFX6: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX6: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX6: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX6: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX6: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX6: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX6: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[SHL]], [[C1]]
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[XOR]], [[SHL1]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[SHL]], [[UMIN]]
+ ; GFX6: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[ADD]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C]](s32)
+ ; GFX6: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[SHL2]], [[C1]]
+ ; GFX6: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[XOR1]], [[SHL3]]
+ ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[SHL2]], [[UMIN1]]
+ ; GFX6: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[ADD1]], [[C]](s32)
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32)
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[COPY6]], [[C]](s32)
+ ; GFX6: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[COPY7]], [[C]](s32)
+ ; GFX6: [[XOR2:%[0-9]+]]:_(s32) = G_XOR [[SHL4]], [[C1]]
+ ; GFX6: [[UMIN2:%[0-9]+]]:_(s32) = G_UMIN [[XOR2]], [[SHL5]]
+ ; GFX6: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[SHL4]], [[UMIN2]]
+ ; GFX6: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[ADD2]], [[C]](s32)
+ ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
+ ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
+ ; GFX6: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[COPY8]], [[C]](s32)
+ ; GFX6: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[COPY9]], [[C]](s32)
+ ; GFX6: [[XOR3:%[0-9]+]]:_(s32) = G_XOR [[SHL6]], [[C1]]
+ ; GFX6: [[UMIN3:%[0-9]+]]:_(s32) = G_UMIN [[XOR3]], [[SHL7]]
+ ; GFX6: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[SHL6]], [[UMIN3]]
+ ; GFX6: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[ADD3]], [[C]](s32)
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C2]]
+ ; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY11]], [[C2]]
+ ; GFX6: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL8]]
+ ; GFX6: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32)
+ ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C2]]
+ ; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32)
+ ; GFX6: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C2]]
+ ; GFX6: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C]](s32)
+ ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL9]]
+ ; GFX6: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX6: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX6: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
; GFX8-LABEL: name: uaddsat_v4s16
; GFX8: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
; GFX8: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX8: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX8: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX8: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[UV]], [[UV4]]
- ; GFX8: [[UADDSAT1:%[0-9]+]]:_(s16) = G_UADDSAT [[UV1]], [[UV5]]
- ; GFX8: [[UADDSAT2:%[0-9]+]]:_(s16) = G_UADDSAT [[UV2]], [[UV6]]
- ; GFX8: [[UADDSAT3:%[0-9]+]]:_(s16) = G_UADDSAT [[UV3]], [[UV7]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[UADDSAT]](s16), [[UADDSAT1]](s16), [[UADDSAT2]](s16), [[UADDSAT3]](s16)
- ; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; GFX8: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX8: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX8: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX8: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX8: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX8: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX8: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX8: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX8: [[C1:%[0-9]+]]:_(s16) = G_CONSTANT i16 -1
+ ; GFX8: [[XOR:%[0-9]+]]:_(s16) = G_XOR [[TRUNC]], [[C1]]
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[XOR]], [[TRUNC4]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s16) = G_ADD [[TRUNC]], [[UMIN]]
+ ; GFX8: [[XOR1:%[0-9]+]]:_(s16) = G_XOR [[TRUNC1]], [[C1]]
+ ; GFX8: [[UMIN1:%[0-9]+]]:_(s16) = G_UMIN [[XOR1]], [[TRUNC5]]
+ ; GFX8: [[ADD1:%[0-9]+]]:_(s16) = G_ADD [[TRUNC1]], [[UMIN1]]
+ ; GFX8: [[XOR2:%[0-9]+]]:_(s16) = G_XOR [[TRUNC2]], [[C1]]
+ ; GFX8: [[UMIN2:%[0-9]+]]:_(s16) = G_UMIN [[XOR2]], [[TRUNC6]]
+ ; GFX8: [[ADD2:%[0-9]+]]:_(s16) = G_ADD [[TRUNC2]], [[UMIN2]]
+ ; GFX8: [[XOR3:%[0-9]+]]:_(s16) = G_XOR [[TRUNC3]], [[C1]]
+ ; GFX8: [[UMIN3:%[0-9]+]]:_(s16) = G_UMIN [[XOR3]], [[TRUNC7]]
+ ; GFX8: [[ADD3:%[0-9]+]]:_(s16) = G_ADD [[TRUNC3]], [[UMIN3]]
+ ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[ADD]](s16)
+ ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[ADD1]](s16)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32)
+ ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
+ ; GFX8: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[ADD2]](s16)
+ ; GFX8: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[ADD3]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[ZEXT3]], [[C]](s32)
+ ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL1]]
+ ; GFX8: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX8: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
; GFX9-LABEL: name: uaddsat_v4s16
; GFX9: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX9: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX9: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX9: [[UADDSAT:%[0-9]+]]:_(s16) = G_UADDSAT [[UV]], [[UV4]]
- ; GFX9: [[UADDSAT1:%[0-9]+]]:_(s16) = G_UADDSAT [[UV1]], [[UV5]]
- ; GFX9: [[UADDSAT2:%[0-9]+]]:_(s16) = G_UADDSAT [[UV2]], [[UV6]]
- ; GFX9: [[UADDSAT3:%[0-9]+]]:_(s16) = G_UADDSAT [[UV3]], [[UV7]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[UADDSAT]](s16), [[UADDSAT1]](s16), [[UADDSAT2]](s16), [[UADDSAT3]](s16)
- ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; GFX9: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX9: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C]](s32), [[C]](s32)
+ ; GFX9: [[XOR:%[0-9]+]]:_(<2 x s16>) = G_XOR [[UV]], [[BUILD_VECTOR_TRUNC]]
+ ; GFX9: [[UMIN:%[0-9]+]]:_(<2 x s16>) = G_UMIN [[XOR]], [[UV2]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(<2 x s16>) = G_ADD [[UV]], [[UMIN]]
+ ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[C]](s32), [[C]](s32)
+ ; GFX9: [[XOR1:%[0-9]+]]:_(<2 x s16>) = G_XOR [[UV1]], [[BUILD_VECTOR_TRUNC1]]
+ ; GFX9: [[UMIN1:%[0-9]+]]:_(<2 x s16>) = G_UMIN [[XOR1]], [[UV3]]
+ ; GFX9: [[ADD1:%[0-9]+]]:_(<2 x s16>) = G_ADD [[UV1]], [[UMIN1]]
+ ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[ADD]](<2 x s16>), [[ADD1]](<2 x s16>)
+ ; GFX9: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr2_vgpr3
%2:_(<4 x s16>) = G_UADDSAT %0, %1
@@ -404,18 +706,27 @@ body: |
; GFX6-LABEL: name: uaddsat_s32
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[UADDSAT:%[0-9]+]]:_(s32) = G_UADDSAT [[COPY]], [[COPY1]]
- ; GFX6: $vgpr0 = COPY [[UADDSAT]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[C]]
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[XOR]], [[COPY1]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[UMIN]]
+ ; GFX6: $vgpr0 = COPY [[ADD]](s32)
; GFX8-LABEL: name: uaddsat_s32
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX8: [[UADDSAT:%[0-9]+]]:_(s32) = G_UADDSAT [[COPY]], [[COPY1]]
- ; GFX8: $vgpr0 = COPY [[UADDSAT]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX8: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[C]]
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[XOR]], [[COPY1]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[UMIN]]
+ ; GFX8: $vgpr0 = COPY [[ADD]](s32)
; GFX9-LABEL: name: uaddsat_s32
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9: [[UADDSAT:%[0-9]+]]:_(s32) = G_UADDSAT [[COPY]], [[COPY1]]
- ; GFX9: $vgpr0 = COPY [[UADDSAT]](s32)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX9: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[COPY]], [[C]]
+ ; GFX9: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[XOR]], [[COPY1]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[UMIN]]
+ ; GFX9: $vgpr0 = COPY [[ADD]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_UADDSAT %0, %1
@@ -433,27 +744,42 @@ body: |
; GFX6: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX6: [[UADDSAT:%[0-9]+]]:_(s32) = G_UADDSAT [[UV]], [[UV2]]
- ; GFX6: [[UADDSAT1:%[0-9]+]]:_(s32) = G_UADDSAT [[UV1]], [[UV3]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UADDSAT]](s32), [[UADDSAT1]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX6: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[C]]
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[XOR]], [[UV2]]
+ ; GFX6: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV]], [[UMIN]]
+ ; GFX6: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[UV1]], [[C]]
+ ; GFX6: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[XOR1]], [[UV3]]
+ ; GFX6: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UV1]], [[UMIN1]]
+ ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ADD]](s32), [[ADD1]](s32)
; GFX6: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
; GFX8-LABEL: name: uaddsat_v2s32
; GFX8: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX8: [[UADDSAT:%[0-9]+]]:_(s32) = G_UADDSAT [[UV]], [[UV2]]
- ; GFX8: [[UADDSAT1:%[0-9]+]]:_(s32) = G_UADDSAT [[UV1]], [[UV3]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UADDSAT]](s32), [[UADDSAT1]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX8: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[C]]
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[XOR]], [[UV2]]
+ ; GFX8: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV]], [[UMIN]]
+ ; GFX8: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[UV1]], [[C]]
+ ; GFX8: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[XOR1]], [[UV3]]
+ ; GFX8: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UV1]], [[UMIN1]]
+ ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ADD]](s32), [[ADD1]](s32)
; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
; GFX9-LABEL: name: uaddsat_v2s32
; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX9: [[UADDSAT:%[0-9]+]]:_(s32) = G_UADDSAT [[UV]], [[UV2]]
- ; GFX9: [[UADDSAT1:%[0-9]+]]:_(s32) = G_UADDSAT [[UV1]], [[UV3]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[UADDSAT]](s32), [[UADDSAT1]](s32)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1
+ ; GFX9: [[XOR:%[0-9]+]]:_(s32) = G_XOR [[UV]], [[C]]
+ ; GFX9: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[XOR]], [[UV2]]
+ ; GFX9: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[UV]], [[UMIN]]
+ ; GFX9: [[XOR1:%[0-9]+]]:_(s32) = G_XOR [[UV1]], [[C]]
+ ; GFX9: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[XOR1]], [[UV3]]
+ ; GFX9: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[UV1]], [[UMIN1]]
+ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[ADD]](s32), [[ADD1]](s32)
; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
@@ -470,18 +796,39 @@ body: |
; GFX6-LABEL: name: uaddsat_s64
; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX6: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX6: [[UADDSAT:%[0-9]+]]:_(s64) = G_UADDSAT [[COPY]], [[COPY1]]
- ; GFX6: $vgpr0_vgpr1 = COPY [[UADDSAT]](s64)
+ ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX6: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]]
+ ; GFX6: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]]
+ ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[MV]](s64), [[COPY1]]
+ ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[MV]]
+ ; GFX6: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
; GFX8-LABEL: name: uaddsat_s64
; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX8: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX8: [[UADDSAT:%[0-9]+]]:_(s64) = G_UADDSAT [[COPY]], [[COPY1]]
- ; GFX8: $vgpr0_vgpr1 = COPY [[UADDSAT]](s64)
+ ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]]
+ ; GFX8: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]]
+ ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[MV]](s64), [[COPY1]]
+ ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; GFX8: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[MV]]
+ ; GFX8: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
; GFX9-LABEL: name: uaddsat_s64
; GFX9: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX9: [[UADDSAT:%[0-9]+]]:_(s64) = G_UADDSAT [[COPY]], [[COPY1]]
- ; GFX9: $vgpr0_vgpr1 = COPY [[UADDSAT]](s64)
+ ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX9: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV]], [[UV2]]
+ ; GFX9: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV1]], [[UV3]], [[UADDO1]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[MV]](s64), [[COPY1]]
+ ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[MV]]
+ ; GFX9: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_UADDSAT %0, %1
@@ -499,27 +846,66 @@ body: |
; GFX6: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
; GFX6: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; GFX6: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX6: [[UADDSAT:%[0-9]+]]:_(s64) = G_UADDSAT [[UV]], [[UV2]]
- ; GFX6: [[UADDSAT1:%[0-9]+]]:_(s64) = G_UADDSAT [[UV1]], [[UV3]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[UADDSAT]](s64), [[UADDSAT1]](s64)
+ ; GFX6: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+ ; GFX6: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
+ ; GFX6: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
+ ; GFX6: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
+ ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[MV]](s64), [[UV2]]
+ ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[MV]]
+ ; GFX6: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+ ; GFX6: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
+ ; GFX6: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
+ ; GFX6: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO3]]
+ ; GFX6: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
+ ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[MV1]](s64), [[UV3]]
+ ; GFX6: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C]], [[MV1]]
+ ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX6: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
; GFX8-LABEL: name: uaddsat_v2s64
; GFX8: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
; GFX8: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; GFX8: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX8: [[UADDSAT:%[0-9]+]]:_(s64) = G_UADDSAT [[UV]], [[UV2]]
- ; GFX8: [[UADDSAT1:%[0-9]+]]:_(s64) = G_UADDSAT [[UV1]], [[UV3]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[UADDSAT]](s64), [[UADDSAT1]](s64)
+ ; GFX8: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+ ; GFX8: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
+ ; GFX8: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
+ ; GFX8: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
+ ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[MV]](s64), [[UV2]]
+ ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; GFX8: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[MV]]
+ ; GFX8: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+ ; GFX8: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
+ ; GFX8: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
+ ; GFX8: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO3]]
+ ; GFX8: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
+ ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[MV1]](s64), [[UV3]]
+ ; GFX8: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C]], [[MV1]]
+ ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX8: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
; GFX9-LABEL: name: uaddsat_v2s64
; GFX9: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX9: [[UADDSAT:%[0-9]+]]:_(s64) = G_UADDSAT [[UV]], [[UV2]]
- ; GFX9: [[UADDSAT1:%[0-9]+]]:_(s64) = G_UADDSAT [[UV1]], [[UV3]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[UADDSAT]](s64), [[UADDSAT1]](s64)
+ ; GFX9: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+ ; GFX9: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
+ ; GFX9: [[UADDO:%[0-9]+]]:_(s32), [[UADDO1:%[0-9]+]]:_(s1) = G_UADDO [[UV4]], [[UV6]]
+ ; GFX9: [[UADDE:%[0-9]+]]:_(s32), [[UADDE1:%[0-9]+]]:_(s1) = G_UADDE [[UV5]], [[UV7]], [[UADDO1]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO]](s32), [[UADDE]](s32)
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[MV]](s64), [[UV2]]
+ ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -1
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[MV]]
+ ; GFX9: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+ ; GFX9: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
+ ; GFX9: [[UADDO2:%[0-9]+]]:_(s32), [[UADDO3:%[0-9]+]]:_(s1) = G_UADDO [[UV8]], [[UV10]]
+ ; GFX9: [[UADDE2:%[0-9]+]]:_(s32), [[UADDE3:%[0-9]+]]:_(s1) = G_UADDE [[UV9]], [[UV11]], [[UADDO3]]
+ ; GFX9: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[UADDO2]](s32), [[UADDE2]](s32)
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[MV1]](s64), [[UV3]]
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C]], [[MV1]]
+ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-usubsat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-usubsat.mir
index 56a1f1baded5..356bb38456ea 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-usubsat.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-usubsat.mir
@@ -12,51 +12,42 @@ body: |
; GFX6-LABEL: name: usubsat_s7
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7)
- ; GFX6: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s7)
- ; GFX6: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9
- ; GFX6: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX6: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX6: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[SHL]], [[SHL1]]
- ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
- ; GFX6: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[USUBSAT]](s16)
- ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT1]], [[ZEXT]](s32)
- ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX6: [[TRUNC3:%[0-9]+]]:_(s7) = G_TRUNC [[TRUNC2]](s16)
- ; GFX6: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC3]](s7)
- ; GFX6: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 25
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[SHL]], [[SHL1]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[UMIN]]
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: $vgpr0 = COPY [[COPY4]](s32)
; GFX8-LABEL: name: usubsat_s7
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX8: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32)
- ; GFX8: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7)
- ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s7)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9
- ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX8: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[SHL]], [[SHL1]]
- ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[USUBSAT]], [[C]](s16)
- ; GFX8: [[TRUNC2:%[0-9]+]]:_(s7) = G_TRUNC [[LSHR]](s16)
- ; GFX8: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s7)
- ; GFX8: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[SHL]], [[SHL1]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SHL]], [[UMIN]]
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SUB]], [[C]](s16)
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
+ ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: usubsat_s7
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9: [[TRUNC:%[0-9]+]]:_(s7) = G_TRUNC [[COPY]](s32)
- ; GFX9: [[TRUNC1:%[0-9]+]]:_(s7) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s7)
- ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s7)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 9
- ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX9: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[SHL]], [[SHL1]]
- ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[USUBSAT]], [[C]](s16)
- ; GFX9: [[TRUNC2:%[0-9]+]]:_(s7) = G_TRUNC [[LSHR]](s16)
- ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s7)
- ; GFX9: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX9: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[SHL]], [[SHL1]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SHL]], [[UMIN]]
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SUB]], [[C]](s16)
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
+ ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s7) = G_TRUNC %0
@@ -75,51 +66,42 @@ body: |
; GFX6-LABEL: name: usubsat_s8
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8)
- ; GFX6: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s8)
- ; GFX6: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX6: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX6: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX6: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[SHL]], [[SHL1]]
- ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
- ; GFX6: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[USUBSAT]](s16)
- ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT1]], [[ZEXT]](s32)
- ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX6: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC2]](s16)
- ; GFX6: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC3]](s8)
- ; GFX6: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[SHL]], [[SHL1]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[UMIN]]
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: $vgpr0 = COPY [[COPY4]](s32)
; GFX8-LABEL: name: usubsat_s8
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX8: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; GFX8: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8)
- ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s8)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX8: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[SHL]], [[SHL1]]
- ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[USUBSAT]], [[C]](s16)
- ; GFX8: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[LSHR]](s16)
- ; GFX8: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s8)
- ; GFX8: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[SHL]], [[SHL1]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SHL]], [[UMIN]]
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SUB]], [[C]](s16)
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
+ ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: usubsat_s8
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9: [[TRUNC:%[0-9]+]]:_(s8) = G_TRUNC [[COPY]](s32)
- ; GFX9: [[TRUNC1:%[0-9]+]]:_(s8) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC]](s8)
- ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[TRUNC1]](s8)
+ ; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
+ ; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX9: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[SHL]], [[SHL1]]
- ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[USUBSAT]], [[C]](s16)
- ; GFX9: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[LSHR]](s16)
- ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[TRUNC2]](s8)
- ; GFX9: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C]](s16)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C]](s16)
+ ; GFX9: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[SHL]], [[SHL1]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SHL]], [[UMIN]]
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[SUB]], [[C]](s16)
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[LSHR]](s16)
+ ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s8) = G_TRUNC %0
@@ -138,75 +120,113 @@ body: |
; GFX6-LABEL: name: usubsat_v2s8
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16)
- ; GFX6: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16)
- ; GFX6: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>)
- ; GFX6: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>)
- ; GFX6: [[USUBSAT:%[0-9]+]]:_(s8) = G_USUBSAT [[UV]], [[UV2]]
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
- ; GFX6: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
- ; GFX6: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX6: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX6: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX6: [[USUBSAT1:%[0-9]+]]:_(s16) = G_USUBSAT [[SHL]], [[SHL1]]
- ; GFX6: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[C]](s16)
- ; GFX6: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[USUBSAT1]](s16)
- ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[ZEXT1]], [[ZEXT]](s32)
- ; GFX6: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
- ; GFX6: [[TRUNC3:%[0-9]+]]:_(s8) = G_TRUNC [[TRUNC2]](s16)
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[USUBSAT]](s8), [[TRUNC3]](s8)
- ; GFX6: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>)
- ; GFX6: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8)
- ; GFX6: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16)
- ; GFX6: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
+ ; GFX6: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
+ ; GFX6: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
+ ; GFX6: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C2]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C2]](s32)
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[SHL]], [[SHL1]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[UMIN]]
+ ; GFX6: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C2]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C2]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C2]](s32)
+ ; GFX6: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[SHL2]], [[SHL3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SHL2]], [[UMIN1]]
+ ; GFX6: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[SUB1]], [[C2]](s32)
+ ; GFX6: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
+ ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR6]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s16) = G_AND [[TRUNC]], [[C3]]
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[C]](s32)
+ ; GFX6: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 255
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C4]]
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[COPY6]](s32)
+ ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[SHL4]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[TRUNC1]]
+ ; GFX6: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
+ ; GFX6: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX8-LABEL: name: usubsat_v2s8
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
+ ; GFX8: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
+ ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
+ ; GFX8: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
+ ; GFX8: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16)
- ; GFX8: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16)
- ; GFX8: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>)
- ; GFX8: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>)
- ; GFX8: [[USUBSAT:%[0-9]+]]:_(s8) = G_USUBSAT [[UV]], [[UV2]]
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
- ; GFX8: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
- ; GFX8: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX8: [[USUBSAT1:%[0-9]+]]:_(s16) = G_USUBSAT [[SHL]], [[SHL1]]
- ; GFX8: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[USUBSAT1]], [[C]](s16)
- ; GFX8: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[LSHR]](s16)
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[USUBSAT]](s8), [[TRUNC2]](s8)
- ; GFX8: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>)
- ; GFX8: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8)
- ; GFX8: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16)
- ; GFX8: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX8: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
+ ; GFX8: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C3]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C3]](s16)
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[SHL]], [[SHL1]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SHL]], [[UMIN]]
+ ; GFX8: [[LSHR6:%[0-9]+]]:_(s16) = G_LSHR [[SUB]], [[C3]](s16)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX8: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C3]](s16)
+ ; GFX8: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[C3]](s16)
+ ; GFX8: [[UMIN1:%[0-9]+]]:_(s16) = G_UMIN [[SHL2]], [[SHL3]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[SHL2]], [[UMIN1]]
+ ; GFX8: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[SUB1]], [[C3]](s16)
+ ; GFX8: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
+ ; GFX8: [[COPY2:%[0-9]+]]:_(s16) = COPY [[LSHR6]](s16)
+ ; GFX8: [[AND:%[0-9]+]]:_(s16) = G_AND [[COPY2]], [[C4]]
+ ; GFX8: [[COPY3:%[0-9]+]]:_(s16) = COPY [[LSHR7]](s16)
+ ; GFX8: [[AND1:%[0-9]+]]:_(s16) = G_AND [[COPY3]], [[C4]]
+ ; GFX8: [[SHL4:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C3]](s16)
+ ; GFX8: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL4]]
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
+ ; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: usubsat_v2s8
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 8
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
+ ; GFX9: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C1]](s32)
+ ; GFX9: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 24
+ ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C2]](s32)
+ ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C]](s32)
+ ; GFX9: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C1]](s32)
+ ; GFX9: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[COPY1]], [[C2]](s32)
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[BITCAST:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC]](s16)
- ; GFX9: [[BITCAST1:%[0-9]+]]:_(<2 x s8>) = G_BITCAST [[TRUNC1]](s16)
- ; GFX9: [[UV:%[0-9]+]]:_(s8), [[UV1:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST]](<2 x s8>)
- ; GFX9: [[UV2:%[0-9]+]]:_(s8), [[UV3:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BITCAST1]](<2 x s8>)
- ; GFX9: [[USUBSAT:%[0-9]+]]:_(s8) = G_USUBSAT [[UV]], [[UV2]]
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s16) = G_ANYEXT [[UV1]](s8)
- ; GFX9: [[ANYEXT1:%[0-9]+]]:_(s16) = G_ANYEXT [[UV3]](s8)
- ; GFX9: [[C:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
- ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT]], [[C]](s16)
- ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[ANYEXT1]], [[C]](s16)
- ; GFX9: [[USUBSAT1:%[0-9]+]]:_(s16) = G_USUBSAT [[SHL]], [[SHL1]]
- ; GFX9: [[LSHR:%[0-9]+]]:_(s16) = G_LSHR [[USUBSAT1]], [[C]](s16)
- ; GFX9: [[TRUNC2:%[0-9]+]]:_(s8) = G_TRUNC [[LSHR]](s16)
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s8>) = G_BUILD_VECTOR [[USUBSAT]](s8), [[TRUNC2]](s8)
- ; GFX9: [[UV4:%[0-9]+]]:_(s8), [[UV5:%[0-9]+]]:_(s8) = G_UNMERGE_VALUES [[BUILD_VECTOR]](<2 x s8>)
- ; GFX9: [[MV:%[0-9]+]]:_(s16) = G_MERGE_VALUES [[UV4]](s8), [[UV5]](s8)
- ; GFX9: [[ANYEXT2:%[0-9]+]]:_(s32) = G_ANYEXT [[MV]](s16)
- ; GFX9: $vgpr0 = COPY [[ANYEXT2]](s32)
+ ; GFX9: [[C3:%[0-9]+]]:_(s16) = G_CONSTANT i16 8
+ ; GFX9: [[SHL:%[0-9]+]]:_(s16) = G_SHL [[TRUNC]], [[C3]](s16)
+ ; GFX9: [[SHL1:%[0-9]+]]:_(s16) = G_SHL [[TRUNC1]], [[C3]](s16)
+ ; GFX9: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[SHL]], [[SHL1]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[SHL]], [[UMIN]]
+ ; GFX9: [[LSHR6:%[0-9]+]]:_(s16) = G_LSHR [[SUB]], [[C3]](s16)
+ ; GFX9: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX9: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX9: [[SHL2:%[0-9]+]]:_(s16) = G_SHL [[TRUNC2]], [[C3]](s16)
+ ; GFX9: [[SHL3:%[0-9]+]]:_(s16) = G_SHL [[TRUNC3]], [[C3]](s16)
+ ; GFX9: [[UMIN1:%[0-9]+]]:_(s16) = G_UMIN [[SHL2]], [[SHL3]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[SHL2]], [[UMIN1]]
+ ; GFX9: [[LSHR7:%[0-9]+]]:_(s16) = G_LSHR [[SUB1]], [[C3]](s16)
+ ; GFX9: [[C4:%[0-9]+]]:_(s16) = G_CONSTANT i16 255
+ ; GFX9: [[COPY2:%[0-9]+]]:_(s16) = COPY [[LSHR6]](s16)
+ ; GFX9: [[AND:%[0-9]+]]:_(s16) = G_AND [[COPY2]], [[C4]]
+ ; GFX9: [[COPY3:%[0-9]+]]:_(s16) = COPY [[LSHR7]](s16)
+ ; GFX9: [[AND1:%[0-9]+]]:_(s16) = G_AND [[COPY3]], [[C4]]
+ ; GFX9: [[SHL4:%[0-9]+]]:_(s16) = G_SHL [[AND1]], [[C3]](s16)
+ ; GFX9: [[OR:%[0-9]+]]:_(s16) = G_OR [[AND]], [[SHL4]]
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[OR]](s16)
+ ; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s16) = G_TRUNC %0
@@ -228,26 +248,33 @@ body: |
; GFX6-LABEL: name: usubsat_s16
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
- ; GFX6: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX6: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[TRUNC]], [[TRUNC1]]
- ; GFX6: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[USUBSAT]](s16)
- ; GFX6: $vgpr0 = COPY [[ANYEXT]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[COPY]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[COPY1]](s32)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[SHL]], [[SHL1]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[UMIN]]
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: $vgpr0 = COPY [[COPY4]](s32)
; GFX8-LABEL: name: usubsat_s16
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX8: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[TRUNC]], [[TRUNC1]]
- ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[USUBSAT]](s16)
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[TRUNC]], [[TRUNC1]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[TRUNC]], [[UMIN]]
+ ; GFX8: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SUB]](s16)
; GFX8: $vgpr0 = COPY [[ANYEXT]](s32)
; GFX9-LABEL: name: usubsat_s16
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
; GFX9: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[COPY]](s32)
; GFX9: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[COPY1]](s32)
- ; GFX9: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[TRUNC]], [[TRUNC1]]
- ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[USUBSAT]](s16)
+ ; GFX9: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[TRUNC]], [[TRUNC1]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[TRUNC]], [[UMIN]]
+ ; GFX9: [[ANYEXT:%[0-9]+]]:_(s32) = G_ANYEXT [[SUB]](s16)
; GFX9: $vgpr0 = COPY [[ANYEXT]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
@@ -267,30 +294,62 @@ body: |
; GFX6-LABEL: name: usubsat_v2s16
; GFX6: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX6: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
- ; GFX6: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
- ; GFX6: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[UV]], [[UV2]]
- ; GFX6: [[USUBSAT1:%[0-9]+]]:_(s16) = G_USUBSAT [[UV1]], [[UV3]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[USUBSAT]](s16), [[USUBSAT1]](s16)
- ; GFX6: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX6: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX6: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[SHL]], [[SHL1]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[UMIN]]
+ ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C]](s32)
+ ; GFX6: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[SHL2]], [[SHL3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SHL2]], [[UMIN1]]
+ ; GFX6: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[SUB1]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY6]], [[C1]]
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]]
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL4]]
+ ; GFX6: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX6: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
; GFX8-LABEL: name: usubsat_v2s16
; GFX8: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX8: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
- ; GFX8: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
- ; GFX8: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[UV]], [[UV2]]
- ; GFX8: [[USUBSAT1:%[0-9]+]]:_(s16) = G_USUBSAT [[UV1]], [[UV3]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[USUBSAT]](s16), [[USUBSAT1]](s16)
- ; GFX8: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[COPY1]](<2 x s16>)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[TRUNC]], [[TRUNC2]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[TRUNC]], [[UMIN]]
+ ; GFX8: [[UMIN1:%[0-9]+]]:_(s16) = G_UMIN [[TRUNC1]], [[TRUNC3]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[TRUNC1]], [[UMIN1]]
+ ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[SUB]](s16)
+ ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[SUB1]](s16)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32)
+ ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
+ ; GFX8: [[BITCAST2:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX8: $vgpr0 = COPY [[BITCAST2]](<2 x s16>)
; GFX9-LABEL: name: usubsat_v2s16
; GFX9: [[COPY:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s16>) = COPY $vgpr1
- ; GFX9: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<2 x s16>)
- ; GFX9: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<2 x s16>)
- ; GFX9: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[UV]], [[UV2]]
- ; GFX9: [[USUBSAT1:%[0-9]+]]:_(s16) = G_USUBSAT [[UV1]], [[UV3]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR [[USUBSAT]](s16), [[USUBSAT1]](s16)
- ; GFX9: $vgpr0 = COPY [[BUILD_VECTOR]](<2 x s16>)
+ ; GFX9: [[UMIN:%[0-9]+]]:_(<2 x s16>) = G_UMIN [[COPY]], [[COPY1]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(<2 x s16>) = G_SUB [[COPY]], [[UMIN]]
+ ; GFX9: $vgpr0 = COPY [[SUB]](<2 x s16>)
%0:_(<2 x s16>) = COPY $vgpr0
%1:_(<2 x s16>) = COPY $vgpr1
%2:_(<2 x s16>) = G_USUBSAT %0, %1
@@ -306,42 +365,142 @@ body: |
; GFX6-LABEL: name: usubsat_v3s16
; GFX6: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
; GFX6: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
- ; GFX6: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>)
- ; GFX6: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>)
- ; GFX6: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[UV2]], [[UV5]]
- ; GFX6: [[USUBSAT1:%[0-9]+]]:_(s16) = G_USUBSAT [[UV3]], [[UV6]]
- ; GFX6: [[USUBSAT2:%[0-9]+]]:_(s16) = G_USUBSAT [[UV4]], [[UV7]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[USUBSAT]](s16), [[USUBSAT1]](s16), [[USUBSAT2]](s16)
; GFX6: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX6: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
- ; GFX6: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[EXTRACT]](<3 x s16>)
- ; GFX6: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>)
+ ; GFX6: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV]](<3 x s16>), 0
+ ; GFX6: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT]](<4 x s16>)
+ ; GFX6: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX6: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX6: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV1]](<3 x s16>), 0
+ ; GFX6: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT1]](<4 x s16>)
+ ; GFX6: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX6: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; GFX6: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY1]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[SHL]], [[SHL1]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[UMIN]]
+ ; GFX6: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; GFX6: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[SHL2]], [[SHL3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SHL2]], [[UMIN1]]
+ ; GFX6: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[SUB1]], [[C]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32)
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C]](s32)
+ ; GFX6: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[COPY6]], [[C]](s32)
+ ; GFX6: [[UMIN2:%[0-9]+]]:_(s32) = G_UMIN [[SHL4]], [[SHL5]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SHL4]], [[UMIN2]]
+ ; GFX6: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[SUB2]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY7]], [[C1]]
+ ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY8]], [[C1]]
+ ; GFX6: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL6]]
+ ; GFX6: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32)
+ ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY9]], [[C1]]
+ ; GFX6: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX6: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[C2]], [[C]](s32)
+ ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL7]]
+ ; GFX6: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX6: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX6: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+ ; GFX6: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
+ ; GFX6: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[EXTRACT]](<3 x s16>), [[EXTRACT1]](<3 x s16>)
+ ; GFX6: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>)
; GFX8-LABEL: name: usubsat_v3s16
; GFX8: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
; GFX8: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
- ; GFX8: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>)
- ; GFX8: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>)
- ; GFX8: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[UV2]], [[UV5]]
- ; GFX8: [[USUBSAT1:%[0-9]+]]:_(s16) = G_USUBSAT [[UV3]], [[UV6]]
- ; GFX8: [[USUBSAT2:%[0-9]+]]:_(s16) = G_USUBSAT [[UV4]], [[UV7]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[USUBSAT]](s16), [[USUBSAT1]](s16), [[USUBSAT2]](s16)
; GFX8: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX8: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
- ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[EXTRACT]](<3 x s16>)
- ; GFX8: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>)
+ ; GFX8: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV]](<3 x s16>), 0
+ ; GFX8: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT]](<4 x s16>)
+ ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX8: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV1]](<3 x s16>), 0
+ ; GFX8: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT1]](<4 x s16>)
+ ; GFX8: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX8: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX8: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; GFX8: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[TRUNC]], [[TRUNC3]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[TRUNC]], [[UMIN]]
+ ; GFX8: [[UMIN1:%[0-9]+]]:_(s16) = G_UMIN [[TRUNC1]], [[TRUNC4]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[TRUNC1]], [[UMIN1]]
+ ; GFX8: [[UMIN2:%[0-9]+]]:_(s16) = G_UMIN [[TRUNC2]], [[TRUNC5]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[TRUNC2]], [[UMIN2]]
+ ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[SUB]](s16)
+ ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[SUB1]](s16)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32)
+ ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
+ ; GFX8: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[SUB2]](s16)
+ ; GFX8: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[C1]], [[C]](s32)
+ ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL1]]
+ ; GFX8: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX8: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<4 x s16>), 0
+ ; GFX8: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
+ ; GFX8: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[EXTRACT]](<3 x s16>), [[EXTRACT1]](<3 x s16>)
+ ; GFX8: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>)
; GFX9-LABEL: name: usubsat_v3s16
; GFX9: [[COPY:%[0-9]+]]:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
; GFX9: [[UV:%[0-9]+]]:_(<3 x s16>), [[UV1:%[0-9]+]]:_(<3 x s16>) = G_UNMERGE_VALUES [[COPY]](<6 x s16>)
- ; GFX9: [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16), [[UV4:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV]](<3 x s16>)
- ; GFX9: [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[UV1]](<3 x s16>)
- ; GFX9: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[UV2]], [[UV5]]
- ; GFX9: [[USUBSAT1:%[0-9]+]]:_(s16) = G_USUBSAT [[UV3]], [[UV6]]
- ; GFX9: [[USUBSAT2:%[0-9]+]]:_(s16) = G_USUBSAT [[UV4]], [[UV7]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<3 x s16>) = G_BUILD_VECTOR [[USUBSAT]](s16), [[USUBSAT1]](s16), [[USUBSAT2]](s16)
; GFX9: [[DEF:%[0-9]+]]:_(<4 x s16>) = G_IMPLICIT_DEF
- ; GFX9: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
- ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[BUILD_VECTOR]](<3 x s16>), [[EXTRACT]](<3 x s16>)
- ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS]](<6 x s16>)
+ ; GFX9: [[INSERT:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV]](<3 x s16>), 0
+ ; GFX9: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT]](<4 x s16>)
+ ; GFX9: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX9: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX9: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX9: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX9: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX9: [[COPY2:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY1]](s32), [[COPY2]](s32)
+ ; GFX9: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX9: [[DEF1:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF
+ ; GFX9: [[BUILD_VECTOR_TRUNC1:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY3]](s32), [[DEF1]](s32)
+ ; GFX9: [[DEF2:%[0-9]+]]:_(<2 x s16>) = G_IMPLICIT_DEF
+ ; GFX9: [[INSERT1:%[0-9]+]]:_(<4 x s16>) = G_INSERT [[DEF]], [[UV1]](<3 x s16>), 0
+ ; GFX9: [[UV4:%[0-9]+]]:_(<2 x s16>), [[UV5:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[INSERT1]](<4 x s16>)
+ ; GFX9: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV4]](<2 x s16>)
+ ; GFX9: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX9: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV5]](<2 x s16>)
+ ; GFX9: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX9: [[COPY4:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
+ ; GFX9: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC2:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY4]](s32), [[COPY5]](s32)
+ ; GFX9: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32)
+ ; GFX9: [[BUILD_VECTOR_TRUNC3:%[0-9]+]]:_(<2 x s16>) = G_BUILD_VECTOR_TRUNC [[COPY6]](s32), [[DEF1]](s32)
+ ; GFX9: [[UMIN:%[0-9]+]]:_(<2 x s16>) = G_UMIN [[BUILD_VECTOR_TRUNC]], [[BUILD_VECTOR_TRUNC2]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(<2 x s16>) = G_SUB [[BUILD_VECTOR_TRUNC]], [[UMIN]]
+ ; GFX9: [[UMIN1:%[0-9]+]]:_(<2 x s16>) = G_UMIN [[BUILD_VECTOR_TRUNC1]], [[BUILD_VECTOR_TRUNC3]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(<2 x s16>) = G_SUB [[BUILD_VECTOR_TRUNC1]], [[UMIN1]]
+ ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[SUB]](<2 x s16>), [[SUB1]](<2 x s16>), [[DEF2]](<2 x s16>)
+ ; GFX9: [[EXTRACT:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[CONCAT_VECTORS]](<6 x s16>), 0
+ ; GFX9: [[EXTRACT1:%[0-9]+]]:_(<3 x s16>) = G_EXTRACT [[DEF]](<4 x s16>), 0
+ ; GFX9: [[CONCAT_VECTORS1:%[0-9]+]]:_(<6 x s16>) = G_CONCAT_VECTORS [[EXTRACT]](<3 x s16>), [[EXTRACT1]](<3 x s16>)
+ ; GFX9: $vgpr0_vgpr1_vgpr2 = COPY [[CONCAT_VECTORS1]](<6 x s16>)
%0:_(<6 x s16>) = COPY $vgpr0_vgpr1_vgpr2
%1:_(<3 x s16>), %2:_(<3 x s16>) = G_UNMERGE_VALUES %0
%3:_(<3 x s16>) = G_USUBSAT %1, %2
@@ -359,36 +518,115 @@ body: |
; GFX6-LABEL: name: usubsat_v4s16
; GFX6: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
; GFX6: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX6: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX6: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX6: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[UV]], [[UV4]]
- ; GFX6: [[USUBSAT1:%[0-9]+]]:_(s16) = G_USUBSAT [[UV1]], [[UV5]]
- ; GFX6: [[USUBSAT2:%[0-9]+]]:_(s16) = G_USUBSAT [[UV2]], [[UV6]]
- ; GFX6: [[USUBSAT3:%[0-9]+]]:_(s16) = G_USUBSAT [[UV3]], [[UV7]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[USUBSAT]](s16), [[USUBSAT1]](s16), [[USUBSAT2]](s16), [[USUBSAT3]](s16)
- ; GFX6: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; GFX6: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX6: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX6: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX6: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX6: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX6: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX6: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX6: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX6: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX6: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX6: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX6: [[COPY2:%[0-9]+]]:_(s32) = COPY [[BITCAST]](s32)
+ ; GFX6: [[COPY3:%[0-9]+]]:_(s32) = COPY [[BITCAST2]](s32)
+ ; GFX6: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY2]], [[C]](s32)
+ ; GFX6: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[COPY3]], [[C]](s32)
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[SHL]], [[SHL1]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[SHL]], [[UMIN]]
+ ; GFX6: [[LSHR4:%[0-9]+]]:_(s32) = G_LSHR [[SUB]], [[C]](s32)
+ ; GFX6: [[COPY4:%[0-9]+]]:_(s32) = COPY [[LSHR]](s32)
+ ; GFX6: [[COPY5:%[0-9]+]]:_(s32) = COPY [[LSHR2]](s32)
+ ; GFX6: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[COPY4]], [[C]](s32)
+ ; GFX6: [[SHL3:%[0-9]+]]:_(s32) = G_SHL [[COPY5]], [[C]](s32)
+ ; GFX6: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[SHL2]], [[SHL3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[SHL2]], [[UMIN1]]
+ ; GFX6: [[LSHR5:%[0-9]+]]:_(s32) = G_LSHR [[SUB1]], [[C]](s32)
+ ; GFX6: [[COPY6:%[0-9]+]]:_(s32) = COPY [[BITCAST1]](s32)
+ ; GFX6: [[COPY7:%[0-9]+]]:_(s32) = COPY [[BITCAST3]](s32)
+ ; GFX6: [[SHL4:%[0-9]+]]:_(s32) = G_SHL [[COPY6]], [[C]](s32)
+ ; GFX6: [[SHL5:%[0-9]+]]:_(s32) = G_SHL [[COPY7]], [[C]](s32)
+ ; GFX6: [[UMIN2:%[0-9]+]]:_(s32) = G_UMIN [[SHL4]], [[SHL5]]
+ ; GFX6: [[SUB2:%[0-9]+]]:_(s32) = G_SUB [[SHL4]], [[UMIN2]]
+ ; GFX6: [[LSHR6:%[0-9]+]]:_(s32) = G_LSHR [[SUB2]], [[C]](s32)
+ ; GFX6: [[COPY8:%[0-9]+]]:_(s32) = COPY [[LSHR1]](s32)
+ ; GFX6: [[COPY9:%[0-9]+]]:_(s32) = COPY [[LSHR3]](s32)
+ ; GFX6: [[SHL6:%[0-9]+]]:_(s32) = G_SHL [[COPY8]], [[C]](s32)
+ ; GFX6: [[SHL7:%[0-9]+]]:_(s32) = G_SHL [[COPY9]], [[C]](s32)
+ ; GFX6: [[UMIN3:%[0-9]+]]:_(s32) = G_UMIN [[SHL6]], [[SHL7]]
+ ; GFX6: [[SUB3:%[0-9]+]]:_(s32) = G_SUB [[SHL6]], [[UMIN3]]
+ ; GFX6: [[LSHR7:%[0-9]+]]:_(s32) = G_LSHR [[SUB3]], [[C]](s32)
+ ; GFX6: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
+ ; GFX6: [[COPY10:%[0-9]+]]:_(s32) = COPY [[LSHR4]](s32)
+ ; GFX6: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY10]], [[C1]]
+ ; GFX6: [[COPY11:%[0-9]+]]:_(s32) = COPY [[LSHR5]](s32)
+ ; GFX6: [[AND1:%[0-9]+]]:_(s32) = G_AND [[COPY11]], [[C1]]
+ ; GFX6: [[SHL8:%[0-9]+]]:_(s32) = G_SHL [[AND1]], [[C]](s32)
+ ; GFX6: [[OR:%[0-9]+]]:_(s32) = G_OR [[AND]], [[SHL8]]
+ ; GFX6: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX6: [[COPY12:%[0-9]+]]:_(s32) = COPY [[LSHR6]](s32)
+ ; GFX6: [[AND2:%[0-9]+]]:_(s32) = G_AND [[COPY12]], [[C1]]
+ ; GFX6: [[COPY13:%[0-9]+]]:_(s32) = COPY [[LSHR7]](s32)
+ ; GFX6: [[AND3:%[0-9]+]]:_(s32) = G_AND [[COPY13]], [[C1]]
+ ; GFX6: [[SHL9:%[0-9]+]]:_(s32) = G_SHL [[AND3]], [[C]](s32)
+ ; GFX6: [[OR1:%[0-9]+]]:_(s32) = G_OR [[AND2]], [[SHL9]]
+ ; GFX6: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX6: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX6: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
; GFX8-LABEL: name: usubsat_v4s16
; GFX8: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
; GFX8: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX8: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX8: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX8: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[UV]], [[UV4]]
- ; GFX8: [[USUBSAT1:%[0-9]+]]:_(s16) = G_USUBSAT [[UV1]], [[UV5]]
- ; GFX8: [[USUBSAT2:%[0-9]+]]:_(s16) = G_USUBSAT [[UV2]], [[UV6]]
- ; GFX8: [[USUBSAT3:%[0-9]+]]:_(s16) = G_USUBSAT [[UV3]], [[UV7]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[USUBSAT]](s16), [[USUBSAT1]](s16), [[USUBSAT2]](s16), [[USUBSAT3]](s16)
- ; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; GFX8: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX8: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[UV]](<2 x s16>)
+ ; GFX8: [[TRUNC:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST]](s32)
+ ; GFX8: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
+ ; GFX8: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
+ ; GFX8: [[TRUNC1:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR]](s32)
+ ; GFX8: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[UV1]](<2 x s16>)
+ ; GFX8: [[TRUNC2:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST1]](s32)
+ ; GFX8: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST1]], [[C]](s32)
+ ; GFX8: [[TRUNC3:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR1]](s32)
+ ; GFX8: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX8: [[BITCAST2:%[0-9]+]]:_(s32) = G_BITCAST [[UV2]](<2 x s16>)
+ ; GFX8: [[TRUNC4:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST2]](s32)
+ ; GFX8: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST2]], [[C]](s32)
+ ; GFX8: [[TRUNC5:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR2]](s32)
+ ; GFX8: [[BITCAST3:%[0-9]+]]:_(s32) = G_BITCAST [[UV3]](<2 x s16>)
+ ; GFX8: [[TRUNC6:%[0-9]+]]:_(s16) = G_TRUNC [[BITCAST3]](s32)
+ ; GFX8: [[LSHR3:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST3]], [[C]](s32)
+ ; GFX8: [[TRUNC7:%[0-9]+]]:_(s16) = G_TRUNC [[LSHR3]](s32)
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s16) = G_UMIN [[TRUNC]], [[TRUNC4]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s16) = G_SUB [[TRUNC]], [[UMIN]]
+ ; GFX8: [[UMIN1:%[0-9]+]]:_(s16) = G_UMIN [[TRUNC1]], [[TRUNC5]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s16) = G_SUB [[TRUNC1]], [[UMIN1]]
+ ; GFX8: [[UMIN2:%[0-9]+]]:_(s16) = G_UMIN [[TRUNC2]], [[TRUNC6]]
+ ; GFX8: [[SUB2:%[0-9]+]]:_(s16) = G_SUB [[TRUNC2]], [[UMIN2]]
+ ; GFX8: [[UMIN3:%[0-9]+]]:_(s16) = G_UMIN [[TRUNC3]], [[TRUNC7]]
+ ; GFX8: [[SUB3:%[0-9]+]]:_(s16) = G_SUB [[TRUNC3]], [[UMIN3]]
+ ; GFX8: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[SUB]](s16)
+ ; GFX8: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[SUB1]](s16)
+ ; GFX8: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[ZEXT1]], [[C]](s32)
+ ; GFX8: [[OR:%[0-9]+]]:_(s32) = G_OR [[ZEXT]], [[SHL]]
+ ; GFX8: [[BITCAST4:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR]](s32)
+ ; GFX8: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[SUB2]](s16)
+ ; GFX8: [[ZEXT3:%[0-9]+]]:_(s32) = G_ZEXT [[SUB3]](s16)
+ ; GFX8: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[ZEXT3]], [[C]](s32)
+ ; GFX8: [[OR1:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[SHL1]]
+ ; GFX8: [[BITCAST5:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[OR1]](s32)
+ ; GFX8: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[BITCAST4]](<2 x s16>), [[BITCAST5]](<2 x s16>)
+ ; GFX8: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
; GFX9-LABEL: name: usubsat_v4s16
; GFX9: [[COPY:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(<4 x s16>) = COPY $vgpr2_vgpr3
- ; GFX9: [[UV:%[0-9]+]]:_(s16), [[UV1:%[0-9]+]]:_(s16), [[UV2:%[0-9]+]]:_(s16), [[UV3:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
- ; GFX9: [[UV4:%[0-9]+]]:_(s16), [[UV5:%[0-9]+]]:_(s16), [[UV6:%[0-9]+]]:_(s16), [[UV7:%[0-9]+]]:_(s16) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
- ; GFX9: [[USUBSAT:%[0-9]+]]:_(s16) = G_USUBSAT [[UV]], [[UV4]]
- ; GFX9: [[USUBSAT1:%[0-9]+]]:_(s16) = G_USUBSAT [[UV1]], [[UV5]]
- ; GFX9: [[USUBSAT2:%[0-9]+]]:_(s16) = G_USUBSAT [[UV2]], [[UV6]]
- ; GFX9: [[USUBSAT3:%[0-9]+]]:_(s16) = G_USUBSAT [[UV3]], [[UV7]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<4 x s16>) = G_BUILD_VECTOR [[USUBSAT]](s16), [[USUBSAT1]](s16), [[USUBSAT2]](s16), [[USUBSAT3]](s16)
- ; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<4 x s16>)
+ ; GFX9: [[UV:%[0-9]+]]:_(<2 x s16>), [[UV1:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY]](<4 x s16>)
+ ; GFX9: [[UV2:%[0-9]+]]:_(<2 x s16>), [[UV3:%[0-9]+]]:_(<2 x s16>) = G_UNMERGE_VALUES [[COPY1]](<4 x s16>)
+ ; GFX9: [[UMIN:%[0-9]+]]:_(<2 x s16>) = G_UMIN [[UV]], [[UV2]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(<2 x s16>) = G_SUB [[UV]], [[UMIN]]
+ ; GFX9: [[UMIN1:%[0-9]+]]:_(<2 x s16>) = G_UMIN [[UV1]], [[UV3]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(<2 x s16>) = G_SUB [[UV1]], [[UMIN1]]
+ ; GFX9: [[CONCAT_VECTORS:%[0-9]+]]:_(<4 x s16>) = G_CONCAT_VECTORS [[SUB]](<2 x s16>), [[SUB1]](<2 x s16>)
+ ; GFX9: $vgpr0_vgpr1 = COPY [[CONCAT_VECTORS]](<4 x s16>)
%0:_(<4 x s16>) = COPY $vgpr0_vgpr1
%1:_(<4 x s16>) = COPY $vgpr2_vgpr3
%2:_(<4 x s16>) = G_USUBSAT %0, %1
@@ -404,18 +642,21 @@ body: |
; GFX6-LABEL: name: usubsat_s32
; GFX6: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX6: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX6: [[USUBSAT:%[0-9]+]]:_(s32) = G_USUBSAT [[COPY]], [[COPY1]]
- ; GFX6: $vgpr0 = COPY [[USUBSAT]](s32)
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[COPY]], [[COPY1]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[UMIN]]
+ ; GFX6: $vgpr0 = COPY [[SUB]](s32)
; GFX8-LABEL: name: usubsat_s32
; GFX8: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX8: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX8: [[USUBSAT:%[0-9]+]]:_(s32) = G_USUBSAT [[COPY]], [[COPY1]]
- ; GFX8: $vgpr0 = COPY [[USUBSAT]](s32)
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[COPY]], [[COPY1]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[UMIN]]
+ ; GFX8: $vgpr0 = COPY [[SUB]](s32)
; GFX9-LABEL: name: usubsat_s32
; GFX9: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; GFX9: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
- ; GFX9: [[USUBSAT:%[0-9]+]]:_(s32) = G_USUBSAT [[COPY]], [[COPY1]]
- ; GFX9: $vgpr0 = COPY [[USUBSAT]](s32)
+ ; GFX9: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[COPY]], [[COPY1]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[COPY]], [[UMIN]]
+ ; GFX9: $vgpr0 = COPY [[SUB]](s32)
%0:_(s32) = COPY $vgpr0
%1:_(s32) = COPY $vgpr1
%2:_(s32) = G_USUBSAT %0, %1
@@ -433,27 +674,33 @@ body: |
; GFX6: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX6: [[USUBSAT:%[0-9]+]]:_(s32) = G_USUBSAT [[UV]], [[UV2]]
- ; GFX6: [[USUBSAT1:%[0-9]+]]:_(s32) = G_USUBSAT [[UV1]], [[UV3]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[USUBSAT]](s32), [[USUBSAT1]](s32)
+ ; GFX6: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[UV]], [[UV2]]
+ ; GFX6: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UV]], [[UMIN]]
+ ; GFX6: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[UV1]], [[UV3]]
+ ; GFX6: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[UV1]], [[UMIN1]]
+ ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SUB]](s32), [[SUB1]](s32)
; GFX6: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
; GFX8-LABEL: name: usubsat_v2s32
; GFX8: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX8: [[USUBSAT:%[0-9]+]]:_(s32) = G_USUBSAT [[UV]], [[UV2]]
- ; GFX8: [[USUBSAT1:%[0-9]+]]:_(s32) = G_USUBSAT [[UV1]], [[UV3]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[USUBSAT]](s32), [[USUBSAT1]](s32)
+ ; GFX8: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[UV]], [[UV2]]
+ ; GFX8: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UV]], [[UMIN]]
+ ; GFX8: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[UV1]], [[UV3]]
+ ; GFX8: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[UV1]], [[UMIN1]]
+ ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SUB]](s32), [[SUB1]](s32)
; GFX8: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
; GFX9-LABEL: name: usubsat_v2s32
; GFX9: [[COPY:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s32>) = COPY $vgpr2_vgpr3
; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](<2 x s32>)
; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](<2 x s32>)
- ; GFX9: [[USUBSAT:%[0-9]+]]:_(s32) = G_USUBSAT [[UV]], [[UV2]]
- ; GFX9: [[USUBSAT1:%[0-9]+]]:_(s32) = G_USUBSAT [[UV1]], [[UV3]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[USUBSAT]](s32), [[USUBSAT1]](s32)
+ ; GFX9: [[UMIN:%[0-9]+]]:_(s32) = G_UMIN [[UV]], [[UV2]]
+ ; GFX9: [[SUB:%[0-9]+]]:_(s32) = G_SUB [[UV]], [[UMIN]]
+ ; GFX9: [[UMIN1:%[0-9]+]]:_(s32) = G_UMIN [[UV1]], [[UV3]]
+ ; GFX9: [[SUB1:%[0-9]+]]:_(s32) = G_SUB [[UV1]], [[UMIN1]]
+ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s32>) = G_BUILD_VECTOR [[SUB]](s32), [[SUB1]](s32)
; GFX9: $vgpr0_vgpr1 = COPY [[BUILD_VECTOR]](<2 x s32>)
%0:_(<2 x s32>) = COPY $vgpr0_vgpr1
%1:_(<2 x s32>) = COPY $vgpr2_vgpr3
@@ -470,18 +717,39 @@ body: |
; GFX6-LABEL: name: usubsat_s64
; GFX6: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX6: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX6: [[USUBSAT:%[0-9]+]]:_(s64) = G_USUBSAT [[COPY]], [[COPY1]]
- ; GFX6: $vgpr0_vgpr1 = COPY [[USUBSAT]](s64)
+ ; GFX6: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX6: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX6: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
+ ; GFX6: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
+ ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
+ ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY1]]
+ ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[MV]]
+ ; GFX6: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
; GFX8-LABEL: name: usubsat_s64
; GFX8: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX8: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX8: [[USUBSAT:%[0-9]+]]:_(s64) = G_USUBSAT [[COPY]], [[COPY1]]
- ; GFX8: $vgpr0_vgpr1 = COPY [[USUBSAT]](s64)
+ ; GFX8: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX8: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX8: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
+ ; GFX8: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
+ ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
+ ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY1]]
+ ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX8: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[MV]]
+ ; GFX8: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
; GFX9-LABEL: name: usubsat_s64
; GFX9: [[COPY:%[0-9]+]]:_(s64) = COPY $vgpr0_vgpr1
; GFX9: [[COPY1:%[0-9]+]]:_(s64) = COPY $vgpr2_vgpr3
- ; GFX9: [[USUBSAT:%[0-9]+]]:_(s64) = G_USUBSAT [[COPY]], [[COPY1]]
- ; GFX9: $vgpr0_vgpr1 = COPY [[USUBSAT]](s64)
+ ; GFX9: [[UV:%[0-9]+]]:_(s32), [[UV1:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY]](s64)
+ ; GFX9: [[UV2:%[0-9]+]]:_(s32), [[UV3:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[COPY1]](s64)
+ ; GFX9: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV]], [[UV2]]
+ ; GFX9: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV1]], [[UV3]], [[USUBO1]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY]](s64), [[COPY1]]
+ ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[MV]]
+ ; GFX9: $vgpr0_vgpr1 = COPY [[SELECT]](s64)
%0:_(s64) = COPY $vgpr0_vgpr1
%1:_(s64) = COPY $vgpr2_vgpr3
%2:_(s64) = G_USUBSAT %0, %1
@@ -499,27 +767,66 @@ body: |
; GFX6: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
; GFX6: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; GFX6: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX6: [[USUBSAT:%[0-9]+]]:_(s64) = G_USUBSAT [[UV]], [[UV2]]
- ; GFX6: [[USUBSAT1:%[0-9]+]]:_(s64) = G_USUBSAT [[UV1]], [[UV3]]
- ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[USUBSAT]](s64), [[USUBSAT1]](s64)
+ ; GFX6: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+ ; GFX6: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
+ ; GFX6: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV4]], [[UV6]]
+ ; GFX6: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV5]], [[UV7]], [[USUBO1]]
+ ; GFX6: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
+ ; GFX6: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV]](s64), [[UV2]]
+ ; GFX6: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX6: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[MV]]
+ ; GFX6: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+ ; GFX6: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
+ ; GFX6: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV8]], [[UV10]]
+ ; GFX6: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV9]], [[UV11]], [[USUBO3]]
+ ; GFX6: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
+ ; GFX6: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV1]](s64), [[UV3]]
+ ; GFX6: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C]], [[MV1]]
+ ; GFX6: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX6: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
; GFX8-LABEL: name: usubsat_v2s64
; GFX8: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; GFX8: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
; GFX8: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; GFX8: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX8: [[USUBSAT:%[0-9]+]]:_(s64) = G_USUBSAT [[UV]], [[UV2]]
- ; GFX8: [[USUBSAT1:%[0-9]+]]:_(s64) = G_USUBSAT [[UV1]], [[UV3]]
- ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[USUBSAT]](s64), [[USUBSAT1]](s64)
+ ; GFX8: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+ ; GFX8: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
+ ; GFX8: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV4]], [[UV6]]
+ ; GFX8: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV5]], [[UV7]], [[USUBO1]]
+ ; GFX8: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
+ ; GFX8: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV]](s64), [[UV2]]
+ ; GFX8: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX8: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[MV]]
+ ; GFX8: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+ ; GFX8: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
+ ; GFX8: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV8]], [[UV10]]
+ ; GFX8: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV9]], [[UV11]], [[USUBO3]]
+ ; GFX8: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
+ ; GFX8: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV1]](s64), [[UV3]]
+ ; GFX8: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C]], [[MV1]]
+ ; GFX8: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX8: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
; GFX9-LABEL: name: usubsat_v2s64
; GFX9: [[COPY:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
; GFX9: [[COPY1:%[0-9]+]]:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
; GFX9: [[UV:%[0-9]+]]:_(s64), [[UV1:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY]](<2 x s64>)
; GFX9: [[UV2:%[0-9]+]]:_(s64), [[UV3:%[0-9]+]]:_(s64) = G_UNMERGE_VALUES [[COPY1]](<2 x s64>)
- ; GFX9: [[USUBSAT:%[0-9]+]]:_(s64) = G_USUBSAT [[UV]], [[UV2]]
- ; GFX9: [[USUBSAT1:%[0-9]+]]:_(s64) = G_USUBSAT [[UV1]], [[UV3]]
- ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[USUBSAT]](s64), [[USUBSAT1]](s64)
+ ; GFX9: [[UV4:%[0-9]+]]:_(s32), [[UV5:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV]](s64)
+ ; GFX9: [[UV6:%[0-9]+]]:_(s32), [[UV7:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV2]](s64)
+ ; GFX9: [[USUBO:%[0-9]+]]:_(s32), [[USUBO1:%[0-9]+]]:_(s1) = G_USUBO [[UV4]], [[UV6]]
+ ; GFX9: [[USUBE:%[0-9]+]]:_(s32), [[USUBE1:%[0-9]+]]:_(s1) = G_USUBE [[UV5]], [[UV7]], [[USUBO1]]
+ ; GFX9: [[MV:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO]](s32), [[USUBE]](s32)
+ ; GFX9: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV]](s64), [[UV2]]
+ ; GFX9: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
+ ; GFX9: [[SELECT:%[0-9]+]]:_(s64) = G_SELECT [[ICMP]](s1), [[C]], [[MV]]
+ ; GFX9: [[UV8:%[0-9]+]]:_(s32), [[UV9:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV1]](s64)
+ ; GFX9: [[UV10:%[0-9]+]]:_(s32), [[UV11:%[0-9]+]]:_(s32) = G_UNMERGE_VALUES [[UV3]](s64)
+ ; GFX9: [[USUBO2:%[0-9]+]]:_(s32), [[USUBO3:%[0-9]+]]:_(s1) = G_USUBO [[UV8]], [[UV10]]
+ ; GFX9: [[USUBE2:%[0-9]+]]:_(s32), [[USUBE3:%[0-9]+]]:_(s1) = G_USUBE [[UV9]], [[UV11]], [[USUBO3]]
+ ; GFX9: [[MV1:%[0-9]+]]:_(s64) = G_MERGE_VALUES [[USUBO2]](s32), [[USUBE2]](s32)
+ ; GFX9: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[UV1]](s64), [[UV3]]
+ ; GFX9: [[SELECT1:%[0-9]+]]:_(s64) = G_SELECT [[ICMP1]](s1), [[C]], [[MV1]]
+ ; GFX9: [[BUILD_VECTOR:%[0-9]+]]:_(<2 x s64>) = G_BUILD_VECTOR [[SELECT]](s64), [[SELECT1]](s64)
; GFX9: $vgpr0_vgpr1_vgpr2_vgpr3 = COPY [[BUILD_VECTOR]](<2 x s64>)
%0:_(<2 x s64>) = COPY $vgpr0_vgpr1_vgpr2_vgpr3
%1:_(<2 x s64>) = COPY $vgpr4_vgpr5_vgpr6_vgpr7
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
new file mode 100644
index 000000000000..80ff9bb2b575
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/saddsat.ll
@@ -0,0 +1,9901 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti -o - %s | FileCheck -check-prefix=GFX6 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=fiji -o - %s | FileCheck -check-prefix=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -o - %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -o - %s | FileCheck -check-prefix=GFX10 %s
+
+define i7 @v_saddsat_i7(i7 %lhs, i7 %rhs) {
+; GFX6-LABEL: v_saddsat_i7:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 25, v0
+; GFX6-NEXT: v_min_i32_e32 v3, 0, v0
+; GFX6-NEXT: v_max_i32_e32 v2, 0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 25, v1
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0x80000000, v3
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, 0x7fffffff, v2
+; GFX6-NEXT: v_max_i32_e32 v1, v3, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 25, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_i7:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 9, v0
+; GFX8-NEXT: v_min_i16_e32 v3, 0, v0
+; GFX8-NEXT: v_max_i16_e32 v2, 0, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 9, v1
+; GFX8-NEXT: v_sub_u16_e32 v3, 0x8000, v3
+; GFX8-NEXT: v_sub_u16_e32 v2, 0x7fff, v2
+; GFX8-NEXT: v_max_i16_e32 v1, v3, v1
+; GFX8-NEXT: v_min_i16_e32 v1, v1, v2
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_ashrrev_i16_e32 v0, 9, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_i7:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 9, v0
+; GFX9-NEXT: v_min_i16_e32 v3, 0, v0
+; GFX9-NEXT: v_max_i16_e32 v2, 0, v0
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 9, v1
+; GFX9-NEXT: v_sub_u16_e32 v3, 0x8000, v3
+; GFX9-NEXT: v_sub_u16_e32 v2, 0x7fff, v2
+; GFX9-NEXT: v_max_i16_e32 v1, v3, v1
+; GFX9-NEXT: v_min_i16_e32 v1, v1, v2
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_ashrrev_i16_e32 v0, 9, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_i7:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_lshlrev_b16_e64 v0, 9, v0
+; GFX10-NEXT: v_lshlrev_b16_e64 v1, 9, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_i16_e64 v2, v0, 0
+; GFX10-NEXT: v_max_i16_e64 v3, v0, 0
+; GFX10-NEXT: v_sub_nc_u16_e64 v2, 0x8000, v2
+; GFX10-NEXT: v_sub_nc_u16_e64 v3, 0x7fff, v3
+; GFX10-NEXT: v_max_i16_e64 v1, v2, v1
+; GFX10-NEXT: v_min_i16_e64 v1, v1, v3
+; GFX10-NEXT: v_add_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: v_ashrrev_i16_e64 v0, 9, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i7 @llvm.sadd.sat.i7(i7 %lhs, i7 %rhs)
+ ret i7 %result
+}
+
+define amdgpu_ps i7 @s_saddsat_i7(i7 inreg %lhs, i7 inreg %rhs) {
+; GFX6-LABEL: s_saddsat_i7:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 25
+; GFX6-NEXT: s_lshl_b32 s1, s1, 25
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s2, s0, 0
+; GFX6-NEXT: s_sub_i32 s2, 0x7fffffff, s2
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s3, s0, 0
+; GFX6-NEXT: s_sub_i32 s3, 0x80000000, s3
+; GFX6-NEXT: s_cmp_gt_i32 s3, s1
+; GFX6-NEXT: s_cselect_b32 s1, s3, s1
+; GFX6-NEXT: s_cmp_lt_i32 s1, s2
+; GFX6-NEXT: s_cselect_b32 s1, s1, s2
+; GFX6-NEXT: s_add_i32 s0, s0, s1
+; GFX6-NEXT: s_ashr_i32 s0, s0, 25
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_i7:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s2, 9, 0x100000
+; GFX8-NEXT: s_lshl_b32 s0, s0, s2
+; GFX8-NEXT: s_lshl_b32 s1, s1, s2
+; GFX8-NEXT: s_sext_i32_i16 s3, s0
+; GFX8-NEXT: s_sext_i32_i16 s4, 0
+; GFX8-NEXT: s_cmp_gt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s5, s3, s4
+; GFX8-NEXT: s_sub_i32 s5, 0x7fff, s5
+; GFX8-NEXT: s_cmp_lt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_sub_i32 s3, 0x8000, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_cmp_gt_i32 s3, s1
+; GFX8-NEXT: s_cselect_b32 s1, s3, s1
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_sext_i32_i16 s3, s5
+; GFX8-NEXT: s_cmp_lt_i32 s1, s3
+; GFX8-NEXT: s_cselect_b32 s1, s1, s3
+; GFX8-NEXT: s_add_i32 s0, s0, s1
+; GFX8-NEXT: s_sext_i32_i16 s0, s0
+; GFX8-NEXT: s_ashr_i32 s0, s0, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_i7:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s2, 9, 0x100000
+; GFX9-NEXT: s_lshl_b32 s0, s0, s2
+; GFX9-NEXT: s_lshl_b32 s1, s1, s2
+; GFX9-NEXT: s_sext_i32_i16 s3, s0
+; GFX9-NEXT: s_sext_i32_i16 s4, 0
+; GFX9-NEXT: s_cmp_gt_i32 s3, s4
+; GFX9-NEXT: s_cselect_b32 s5, s3, s4
+; GFX9-NEXT: s_sub_i32 s5, 0x7fff, s5
+; GFX9-NEXT: s_cmp_lt_i32 s3, s4
+; GFX9-NEXT: s_cselect_b32 s3, s3, s4
+; GFX9-NEXT: s_sub_i32 s3, 0x8000, s3
+; GFX9-NEXT: s_sext_i32_i16 s3, s3
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_cmp_gt_i32 s3, s1
+; GFX9-NEXT: s_cselect_b32 s1, s3, s1
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_sext_i32_i16 s3, s5
+; GFX9-NEXT: s_cmp_lt_i32 s1, s3
+; GFX9-NEXT: s_cselect_b32 s1, s1, s3
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: s_sext_i32_i16 s0, s0
+; GFX9-NEXT: s_ashr_i32 s0, s0, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_i7:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s2, 9, 0x100000
+; GFX10-NEXT: s_sext_i32_i16 s4, 0
+; GFX10-NEXT: s_lshl_b32 s0, s0, s2
+; GFX10-NEXT: s_lshl_b32 s1, s1, s2
+; GFX10-NEXT: s_sext_i32_i16 s3, s0
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_gt_i32 s3, s4
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s5, s3, s4
+; GFX10-NEXT: s_sub_i32 s5, 0x7fff, s5
+; GFX10-NEXT: s_cmp_lt_i32 s3, s4
+; GFX10-NEXT: s_cselect_b32 s3, s3, s4
+; GFX10-NEXT: s_sub_i32 s3, 0x8000, s3
+; GFX10-NEXT: s_sext_i32_i16 s3, s3
+; GFX10-NEXT: s_cmp_gt_i32 s3, s1
+; GFX10-NEXT: s_cselect_b32 s1, s3, s1
+; GFX10-NEXT: s_sext_i32_i16 s3, s5
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s3
+; GFX10-NEXT: s_cselect_b32 s1, s1, s3
+; GFX10-NEXT: s_add_i32 s0, s0, s1
+; GFX10-NEXT: s_sext_i32_i16 s0, s0
+; GFX10-NEXT: s_ashr_i32 s0, s0, s2
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i7 @llvm.sadd.sat.i7(i7 %lhs, i7 %rhs)
+ ret i7 %result
+}
+
+define i8 @v_saddsat_i8(i8 %lhs, i8 %rhs) {
+; GFX6-LABEL: v_saddsat_i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: v_min_i32_e32 v3, 0, v0
+; GFX6-NEXT: v_max_i32_e32 v2, 0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0x80000000, v3
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, 0x7fffffff, v2
+; GFX6-NEXT: v_max_i32_e32 v1, v3, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 24, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: v_min_i16_e32 v3, 0, v0
+; GFX8-NEXT: v_max_i16_e32 v2, 0, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_sub_u16_e32 v3, 0x8000, v3
+; GFX8-NEXT: v_sub_u16_e32 v2, 0x7fff, v2
+; GFX8-NEXT: v_max_i16_e32 v1, v3, v1
+; GFX8-NEXT: v_min_i16_e32 v1, v1, v2
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_ashrrev_i16_e32 v0, 8, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: v_min_i16_e32 v3, 0, v0
+; GFX9-NEXT: v_max_i16_e32 v2, 0, v0
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_sub_u16_e32 v3, 0x8000, v3
+; GFX9-NEXT: v_sub_u16_e32 v2, 0x7fff, v2
+; GFX9-NEXT: v_max_i16_e32 v1, v3, v1
+; GFX9-NEXT: v_min_i16_e32 v1, v1, v2
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_ashrrev_i16_e32 v0, 8, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_lshlrev_b16_e64 v0, 8, v0
+; GFX10-NEXT: v_lshlrev_b16_e64 v1, 8, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_i16_e64 v2, v0, 0
+; GFX10-NEXT: v_max_i16_e64 v3, v0, 0
+; GFX10-NEXT: v_sub_nc_u16_e64 v2, 0x8000, v2
+; GFX10-NEXT: v_sub_nc_u16_e64 v3, 0x7fff, v3
+; GFX10-NEXT: v_max_i16_e64 v1, v2, v1
+; GFX10-NEXT: v_min_i16_e64 v1, v1, v3
+; GFX10-NEXT: v_add_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: v_ashrrev_i16_e64 v0, 8, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i8 @llvm.sadd.sat.i8(i8 %lhs, i8 %rhs)
+ ret i8 %result
+}
+
+define amdgpu_ps i8 @s_saddsat_i8(i8 inreg %lhs, i8 inreg %rhs) {
+; GFX6-LABEL: s_saddsat_i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s2, s0, 0
+; GFX6-NEXT: s_sub_i32 s2, 0x7fffffff, s2
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s3, s0, 0
+; GFX6-NEXT: s_sub_i32 s3, 0x80000000, s3
+; GFX6-NEXT: s_cmp_gt_i32 s3, s1
+; GFX6-NEXT: s_cselect_b32 s1, s3, s1
+; GFX6-NEXT: s_cmp_lt_i32 s1, s2
+; GFX6-NEXT: s_cselect_b32 s1, s1, s2
+; GFX6-NEXT: s_add_i32 s0, s0, s1
+; GFX6-NEXT: s_ashr_i32 s0, s0, 24
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX8-NEXT: s_lshl_b32 s0, s0, s2
+; GFX8-NEXT: s_lshl_b32 s1, s1, s2
+; GFX8-NEXT: s_sext_i32_i16 s3, s0
+; GFX8-NEXT: s_sext_i32_i16 s4, 0
+; GFX8-NEXT: s_cmp_gt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s5, s3, s4
+; GFX8-NEXT: s_sub_i32 s5, 0x7fff, s5
+; GFX8-NEXT: s_cmp_lt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_sub_i32 s3, 0x8000, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_cmp_gt_i32 s3, s1
+; GFX8-NEXT: s_cselect_b32 s1, s3, s1
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_sext_i32_i16 s3, s5
+; GFX8-NEXT: s_cmp_lt_i32 s1, s3
+; GFX8-NEXT: s_cselect_b32 s1, s1, s3
+; GFX8-NEXT: s_add_i32 s0, s0, s1
+; GFX8-NEXT: s_sext_i32_i16 s0, s0
+; GFX8-NEXT: s_ashr_i32 s0, s0, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX9-NEXT: s_lshl_b32 s0, s0, s2
+; GFX9-NEXT: s_lshl_b32 s1, s1, s2
+; GFX9-NEXT: s_sext_i32_i16 s3, s0
+; GFX9-NEXT: s_sext_i32_i16 s4, 0
+; GFX9-NEXT: s_cmp_gt_i32 s3, s4
+; GFX9-NEXT: s_cselect_b32 s5, s3, s4
+; GFX9-NEXT: s_sub_i32 s5, 0x7fff, s5
+; GFX9-NEXT: s_cmp_lt_i32 s3, s4
+; GFX9-NEXT: s_cselect_b32 s3, s3, s4
+; GFX9-NEXT: s_sub_i32 s3, 0x8000, s3
+; GFX9-NEXT: s_sext_i32_i16 s3, s3
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_cmp_gt_i32 s3, s1
+; GFX9-NEXT: s_cselect_b32 s1, s3, s1
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_sext_i32_i16 s3, s5
+; GFX9-NEXT: s_cmp_lt_i32 s1, s3
+; GFX9-NEXT: s_cselect_b32 s1, s1, s3
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: s_sext_i32_i16 s0, s0
+; GFX9-NEXT: s_ashr_i32 s0, s0, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX10-NEXT: s_sext_i32_i16 s4, 0
+; GFX10-NEXT: s_lshl_b32 s0, s0, s2
+; GFX10-NEXT: s_lshl_b32 s1, s1, s2
+; GFX10-NEXT: s_sext_i32_i16 s3, s0
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_gt_i32 s3, s4
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s5, s3, s4
+; GFX10-NEXT: s_sub_i32 s5, 0x7fff, s5
+; GFX10-NEXT: s_cmp_lt_i32 s3, s4
+; GFX10-NEXT: s_cselect_b32 s3, s3, s4
+; GFX10-NEXT: s_sub_i32 s3, 0x8000, s3
+; GFX10-NEXT: s_sext_i32_i16 s3, s3
+; GFX10-NEXT: s_cmp_gt_i32 s3, s1
+; GFX10-NEXT: s_cselect_b32 s1, s3, s1
+; GFX10-NEXT: s_sext_i32_i16 s3, s5
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s3
+; GFX10-NEXT: s_cselect_b32 s1, s1, s3
+; GFX10-NEXT: s_add_i32 s0, s0, s1
+; GFX10-NEXT: s_sext_i32_i16 s0, s0
+; GFX10-NEXT: s_ashr_i32 s0, s0, s2
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i8 @llvm.sadd.sat.i8(i8 %lhs, i8 %rhs)
+ ret i8 %result
+}
+
+define i16 @v_saddsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
+; GFX6-LABEL: v_saddsat_v2i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 8, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v5, 0, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 8, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, s5, v5
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v4, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s4, v4
+; GFX6-NEXT: v_max_i32_e32 v1, v5, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v4
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v2
+; GFX6-NEXT: v_min_i32_e32 v4, 0, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 24, v3
+; GFX6-NEXT: v_max_i32_e32 v3, 0, v1
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s5, v4
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, s4, v3
+; GFX6-NEXT: v_max_i32_e32 v2, v4, v2
+; GFX6-NEXT: v_min_i32_e32 v2, v2, v3
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 24, v1
+; GFX6-NEXT: v_mov_b32_e32 v2, 0xff
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 24, v0
+; GFX6-NEXT: v_and_b32_e32 v1, v1, v2
+; GFX6-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_v2i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, 8
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v2, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: s_mov_b32 s5, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v5, 0, v0
+; GFX8-NEXT: v_lshrrev_b32_sdwa v2, v2, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_sub_u16_e32 v5, s5, v5
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v4, 0, v0
+; GFX8-NEXT: v_sub_u16_e32 v4, s4, v4
+; GFX8-NEXT: v_max_i16_e32 v1, v5, v1
+; GFX8-NEXT: v_min_i16_e32 v1, v1, v4
+; GFX8-NEXT: v_min_i16_e32 v4, 0, v3
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_max_i16_e32 v1, 0, v3
+; GFX8-NEXT: v_sub_u16_e32 v4, s5, v4
+; GFX8-NEXT: v_sub_u16_e32 v1, s4, v1
+; GFX8-NEXT: v_max_i16_e32 v2, v4, v2
+; GFX8-NEXT: v_min_i16_e32 v1, v2, v1
+; GFX8-NEXT: v_mov_b32_e32 v2, 0xff
+; GFX8-NEXT: v_add_u16_e32 v1, v3, v1
+; GFX8-NEXT: v_and_b32_sdwa v0, sext(v0), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v1), v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_v2i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 8
+; GFX9-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: s_mov_b32 s5, 0x8000
+; GFX9-NEXT: v_min_i16_e32 v5, 0, v0
+; GFX9-NEXT: v_lshrrev_b32_sdwa v3, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_sub_u16_e32 v5, s5, v5
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_max_i16_e32 v4, 0, v0
+; GFX9-NEXT: v_sub_u16_e32 v4, s4, v4
+; GFX9-NEXT: v_max_i16_e32 v1, v5, v1
+; GFX9-NEXT: v_min_i16_e32 v1, v1, v4
+; GFX9-NEXT: v_min_i16_e32 v4, 0, v2
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_max_i16_e32 v1, 0, v2
+; GFX9-NEXT: v_sub_u16_e32 v4, s5, v4
+; GFX9-NEXT: v_sub_u16_e32 v1, s4, v1
+; GFX9-NEXT: v_max_i16_e32 v3, v4, v3
+; GFX9-NEXT: v_min_i16_e32 v1, v3, v1
+; GFX9-NEXT: s_movk_i32 s4, 0xff
+; GFX9-NEXT: v_add_u16_e32 v1, v2, v1
+; GFX9-NEXT: v_and_b32_sdwa v0, sext(v0), s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_and_b32_sdwa v1, sext(v1), s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_v2i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_mov_b32 s4, 8
+; GFX10-NEXT: v_lshlrev_b16_e64 v2, 8, v0
+; GFX10-NEXT: v_lshrrev_b32_sdwa v0, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: s_mov_b32 s5, 0x8000
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_lshlrev_b16_e64 v1, 8, v1
+; GFX10-NEXT: v_min_i16_e64 v4, v2, 0
+; GFX10-NEXT: v_min_i16_e64 v5, v0, 0
+; GFX10-NEXT: v_max_i16_e64 v6, v2, 0
+; GFX10-NEXT: v_max_i16_e64 v7, v0, 0
+; GFX10-NEXT: s_movk_i32 s4, 0x7fff
+; GFX10-NEXT: v_sub_nc_u16_e64 v4, s5, v4
+; GFX10-NEXT: v_sub_nc_u16_e64 v5, s5, v5
+; GFX10-NEXT: v_sub_nc_u16_e64 v6, s4, v6
+; GFX10-NEXT: v_sub_nc_u16_e64 v7, s4, v7
+; GFX10-NEXT: s_movk_i32 s4, 0xff
+; GFX10-NEXT: v_max_i16_e64 v1, v4, v1
+; GFX10-NEXT: v_max_i16_e64 v10, v5, v3
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_i16_e64 v1, v1, v6
+; GFX10-NEXT: v_min_i16_e64 v3, v10, v7
+; GFX10-NEXT: v_add_nc_u16_e64 v1, v2, v1
+; GFX10-NEXT: v_add_nc_u16_e64 v0, v0, v3
+; GFX10-NEXT: v_and_b32_sdwa v1, sext(v1), s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_b32_sdwa v0, sext(v0), s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %lhs = bitcast i16 %lhs.arg to <2 x i8>
+ %rhs = bitcast i16 %rhs.arg to <2 x i8>
+ %result = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %lhs, <2 x i8> %rhs)
+ %cast.result = bitcast <2 x i8> %result to i16
+ ret i16 %cast.result
+}
+
+define amdgpu_ps i16 @s_saddsat_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg) {
+; GFX6-LABEL: s_saddsat_v2i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshr_b32 s2, s0, 8
+; GFX6-NEXT: s_lshr_b32 s3, s1, 8
+; GFX6-NEXT: s_lshl_b32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: s_cselect_b32 s6, s0, 0
+; GFX6-NEXT: s_sub_i32 s6, s4, s6
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s7, s0, 0
+; GFX6-NEXT: s_sub_i32 s7, s5, s7
+; GFX6-NEXT: s_cmp_gt_i32 s7, s1
+; GFX6-NEXT: s_cselect_b32 s1, s7, s1
+; GFX6-NEXT: s_cmp_lt_i32 s1, s6
+; GFX6-NEXT: s_cselect_b32 s1, s1, s6
+; GFX6-NEXT: s_add_i32 s0, s0, s1
+; GFX6-NEXT: s_lshl_b32 s1, s2, 24
+; GFX6-NEXT: s_ashr_i32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s2, s3, 24
+; GFX6-NEXT: s_cmp_gt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s3, s1, 0
+; GFX6-NEXT: s_sub_i32 s3, s4, s3
+; GFX6-NEXT: s_cmp_lt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s4, s1, 0
+; GFX6-NEXT: s_sub_i32 s4, s5, s4
+; GFX6-NEXT: s_cmp_gt_i32 s4, s2
+; GFX6-NEXT: s_cselect_b32 s2, s4, s2
+; GFX6-NEXT: s_cmp_lt_i32 s2, s3
+; GFX6-NEXT: s_cselect_b32 s2, s2, s3
+; GFX6-NEXT: s_add_i32 s1, s1, s2
+; GFX6-NEXT: s_movk_i32 s2, 0xff
+; GFX6-NEXT: s_ashr_i32 s1, s1, 24
+; GFX6-NEXT: s_and_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s0, s0, s2
+; GFX6-NEXT: s_lshl_b32 s1, s1, 8
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_v2i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s4, 8, 0x100000
+; GFX8-NEXT: s_lshr_b32 s2, s0, 8
+; GFX8-NEXT: s_lshl_b32 s0, s0, s4
+; GFX8-NEXT: s_lshr_b32 s3, s1, 8
+; GFX8-NEXT: s_lshl_b32 s1, s1, s4
+; GFX8-NEXT: s_sext_i32_i16 s7, s0
+; GFX8-NEXT: s_sext_i32_i16 s8, 0
+; GFX8-NEXT: s_cmp_gt_i32 s7, s8
+; GFX8-NEXT: s_movk_i32 s5, 0x7fff
+; GFX8-NEXT: s_cselect_b32 s9, s7, s8
+; GFX8-NEXT: s_sub_i32 s9, s5, s9
+; GFX8-NEXT: s_cmp_lt_i32 s7, s8
+; GFX8-NEXT: s_mov_b32 s6, 0x8000
+; GFX8-NEXT: s_cselect_b32 s7, s7, s8
+; GFX8-NEXT: s_sub_i32 s7, s6, s7
+; GFX8-NEXT: s_sext_i32_i16 s7, s7
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_cmp_gt_i32 s7, s1
+; GFX8-NEXT: s_cselect_b32 s1, s7, s1
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_sext_i32_i16 s7, s9
+; GFX8-NEXT: s_cmp_lt_i32 s1, s7
+; GFX8-NEXT: s_cselect_b32 s1, s1, s7
+; GFX8-NEXT: s_add_i32 s0, s0, s1
+; GFX8-NEXT: s_sext_i32_i16 s0, s0
+; GFX8-NEXT: s_lshl_b32 s1, s2, s4
+; GFX8-NEXT: s_lshl_b32 s2, s3, s4
+; GFX8-NEXT: s_ashr_i32 s0, s0, s4
+; GFX8-NEXT: s_sext_i32_i16 s3, s1
+; GFX8-NEXT: s_cmp_gt_i32 s3, s8
+; GFX8-NEXT: s_cselect_b32 s7, s3, s8
+; GFX8-NEXT: s_sub_i32 s5, s5, s7
+; GFX8-NEXT: s_cmp_lt_i32 s3, s8
+; GFX8-NEXT: s_cselect_b32 s3, s3, s8
+; GFX8-NEXT: s_sub_i32 s3, s6, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_cmp_gt_i32 s3, s2
+; GFX8-NEXT: s_cselect_b32 s2, s3, s2
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_sext_i32_i16 s3, s5
+; GFX8-NEXT: s_cmp_lt_i32 s2, s3
+; GFX8-NEXT: s_cselect_b32 s2, s2, s3
+; GFX8-NEXT: s_add_i32 s1, s1, s2
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_movk_i32 s2, 0xff
+; GFX8-NEXT: s_ashr_i32 s1, s1, s4
+; GFX8-NEXT: s_and_b32 s1, s1, s2
+; GFX8-NEXT: s_and_b32 s0, s0, s2
+; GFX8-NEXT: s_lshl_b32 s1, s1, s4
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_v2i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s4, 8, 0x100000
+; GFX9-NEXT: s_lshr_b32 s2, s0, 8
+; GFX9-NEXT: s_lshl_b32 s0, s0, s4
+; GFX9-NEXT: s_lshr_b32 s3, s1, 8
+; GFX9-NEXT: s_lshl_b32 s1, s1, s4
+; GFX9-NEXT: s_sext_i32_i16 s7, s0
+; GFX9-NEXT: s_sext_i32_i16 s8, 0
+; GFX9-NEXT: s_cmp_gt_i32 s7, s8
+; GFX9-NEXT: s_movk_i32 s5, 0x7fff
+; GFX9-NEXT: s_cselect_b32 s9, s7, s8
+; GFX9-NEXT: s_sub_i32 s9, s5, s9
+; GFX9-NEXT: s_cmp_lt_i32 s7, s8
+; GFX9-NEXT: s_mov_b32 s6, 0x8000
+; GFX9-NEXT: s_cselect_b32 s7, s7, s8
+; GFX9-NEXT: s_sub_i32 s7, s6, s7
+; GFX9-NEXT: s_sext_i32_i16 s7, s7
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_cmp_gt_i32 s7, s1
+; GFX9-NEXT: s_cselect_b32 s1, s7, s1
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_sext_i32_i16 s7, s9
+; GFX9-NEXT: s_cmp_lt_i32 s1, s7
+; GFX9-NEXT: s_cselect_b32 s1, s1, s7
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: s_sext_i32_i16 s0, s0
+; GFX9-NEXT: s_lshl_b32 s1, s2, s4
+; GFX9-NEXT: s_lshl_b32 s2, s3, s4
+; GFX9-NEXT: s_ashr_i32 s0, s0, s4
+; GFX9-NEXT: s_sext_i32_i16 s3, s1
+; GFX9-NEXT: s_cmp_gt_i32 s3, s8
+; GFX9-NEXT: s_cselect_b32 s7, s3, s8
+; GFX9-NEXT: s_sub_i32 s5, s5, s7
+; GFX9-NEXT: s_cmp_lt_i32 s3, s8
+; GFX9-NEXT: s_cselect_b32 s3, s3, s8
+; GFX9-NEXT: s_sub_i32 s3, s6, s3
+; GFX9-NEXT: s_sext_i32_i16 s3, s3
+; GFX9-NEXT: s_sext_i32_i16 s2, s2
+; GFX9-NEXT: s_cmp_gt_i32 s3, s2
+; GFX9-NEXT: s_cselect_b32 s2, s3, s2
+; GFX9-NEXT: s_sext_i32_i16 s2, s2
+; GFX9-NEXT: s_sext_i32_i16 s3, s5
+; GFX9-NEXT: s_cmp_lt_i32 s2, s3
+; GFX9-NEXT: s_cselect_b32 s2, s2, s3
+; GFX9-NEXT: s_add_i32 s1, s1, s2
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_movk_i32 s2, 0xff
+; GFX9-NEXT: s_ashr_i32 s1, s1, s4
+; GFX9-NEXT: s_and_b32 s1, s1, s2
+; GFX9-NEXT: s_and_b32 s0, s0, s2
+; GFX9-NEXT: s_lshl_b32 s1, s1, s4
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_v2i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX10-NEXT: s_lshr_b32 s3, s0, 8
+; GFX10-NEXT: s_lshl_b32 s0, s0, s2
+; GFX10-NEXT: s_sext_i32_i16 s6, 0
+; GFX10-NEXT: s_sext_i32_i16 s5, s0
+; GFX10-NEXT: s_lshr_b32 s4, s1, 8
+; GFX10-NEXT: s_lshl_b32 s1, s1, s2
+; GFX10-NEXT: s_cmp_gt_i32 s5, s6
+; GFX10-NEXT: s_movk_i32 s7, 0x7fff
+; GFX10-NEXT: s_cselect_b32 s8, s5, s6
+; GFX10-NEXT: s_mov_b32 s9, 0x8000
+; GFX10-NEXT: s_sub_i32 s8, s7, s8
+; GFX10-NEXT: s_cmp_lt_i32 s5, s6
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_sub_i32 s5, s9, s5
+; GFX10-NEXT: s_sext_i32_i16 s5, s5
+; GFX10-NEXT: s_cmp_gt_i32 s5, s1
+; GFX10-NEXT: s_cselect_b32 s1, s5, s1
+; GFX10-NEXT: s_sext_i32_i16 s5, s8
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s5
+; GFX10-NEXT: s_cselect_b32 s1, s1, s5
+; GFX10-NEXT: s_lshl_b32 s3, s3, s2
+; GFX10-NEXT: s_add_i32 s0, s0, s1
+; GFX10-NEXT: s_lshl_b32 s1, s4, s2
+; GFX10-NEXT: s_sext_i32_i16 s0, s0
+; GFX10-NEXT: s_sext_i32_i16 s4, s3
+; GFX10-NEXT: s_ashr_i32 s0, s0, s2
+; GFX10-NEXT: s_cmp_gt_i32 s4, s6
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cselect_b32 s5, s4, s6
+; GFX10-NEXT: s_sub_i32 s5, s7, s5
+; GFX10-NEXT: s_cmp_lt_i32 s4, s6
+; GFX10-NEXT: s_cselect_b32 s4, s4, s6
+; GFX10-NEXT: s_sub_i32 s4, s9, s4
+; GFX10-NEXT: s_sext_i32_i16 s4, s4
+; GFX10-NEXT: s_cmp_gt_i32 s4, s1
+; GFX10-NEXT: s_cselect_b32 s1, s4, s1
+; GFX10-NEXT: s_sext_i32_i16 s4, s5
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s4
+; GFX10-NEXT: s_cselect_b32 s1, s1, s4
+; GFX10-NEXT: s_add_i32 s3, s3, s1
+; GFX10-NEXT: s_sext_i32_i16 s1, s3
+; GFX10-NEXT: s_movk_i32 s3, 0xff
+; GFX10-NEXT: s_ashr_i32 s1, s1, s2
+; GFX10-NEXT: s_and_b32 s0, s0, s3
+; GFX10-NEXT: s_and_b32 s1, s1, s3
+; GFX10-NEXT: s_lshl_b32 s1, s1, s2
+; GFX10-NEXT: s_or_b32 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %lhs = bitcast i16 %lhs.arg to <2 x i8>
+ %rhs = bitcast i16 %rhs.arg to <2 x i8>
+ %result = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> %lhs, <2 x i8> %rhs)
+ %cast.result = bitcast <2 x i8> %result to i16
+ ret i16 %cast.result
+}
+
+define i32 @v_saddsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
+; GFX6-LABEL: v_saddsat_v4i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 8, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v4, 24, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v10, 0, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v5, 8, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v7, 24, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, s5, v10
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v8, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, s4, v8
+; GFX6-NEXT: v_max_i32_e32 v1, v10, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v8
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v2
+; GFX6-NEXT: v_min_i32_e32 v8, 0, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 24, v5
+; GFX6-NEXT: v_max_i32_e32 v5, 0, v1
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, s5, v8
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, s4, v5
+; GFX6-NEXT: v_max_i32_e32 v2, v8, v2
+; GFX6-NEXT: v_min_i32_e32 v2, v2, v5
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 24, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 24, v6
+; GFX6-NEXT: v_min_i32_e32 v6, 0, v2
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, s5, v6
+; GFX6-NEXT: v_bfrev_b32_e32 v9, -2
+; GFX6-NEXT: v_max_i32_e32 v5, 0, v2
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v9, v5
+; GFX6-NEXT: v_max_i32_e32 v3, v6, v3
+; GFX6-NEXT: v_min_i32_e32 v3, v3, v5
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 24, v4
+; GFX6-NEXT: v_mov_b32_e32 v11, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v6, 0, v3
+; GFX6-NEXT: v_max_i32_e32 v5, 0, v3
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 24, v1
+; GFX6-NEXT: s_movk_i32 s4, 0xff
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 24, v7
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, v11, v6
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 24, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v9, v5
+; GFX6-NEXT: v_max_i32_e32 v4, v6, v4
+; GFX6-NEXT: v_min_i32_e32 v4, v4, v5
+; GFX6-NEXT: v_ashrrev_i32_e32 v2, 24, v2
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v4
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v3, 24, v3
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_v4i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, 8
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v2, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: s_mov_b32 s5, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v10, 0, v0
+; GFX8-NEXT: v_lshrrev_b32_sdwa v2, v2, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v1
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_sub_u16_e32 v10, s5, v10
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v8, 0, v0
+; GFX8-NEXT: v_sub_u16_e32 v8, s4, v8
+; GFX8-NEXT: v_max_i16_e32 v1, v10, v1
+; GFX8-NEXT: v_min_i16_e32 v1, v1, v8
+; GFX8-NEXT: v_min_i16_e32 v8, 0, v3
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_max_i16_e32 v1, 0, v3
+; GFX8-NEXT: v_sub_u16_e32 v8, s5, v8
+; GFX8-NEXT: v_sub_u16_e32 v1, s4, v1
+; GFX8-NEXT: v_max_i16_e32 v2, v8, v2
+; GFX8-NEXT: v_min_i16_e32 v1, v2, v1
+; GFX8-NEXT: v_lshlrev_b16_e32 v2, 8, v4
+; GFX8-NEXT: v_add_u16_e32 v1, v3, v1
+; GFX8-NEXT: v_lshlrev_b16_e32 v3, 8, v6
+; GFX8-NEXT: v_min_i16_e32 v6, 0, v2
+; GFX8-NEXT: v_sub_u16_e32 v6, s5, v6
+; GFX8-NEXT: v_mov_b32_e32 v9, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v4, 0, v2
+; GFX8-NEXT: v_sub_u16_e32 v4, v9, v4
+; GFX8-NEXT: v_max_i16_e32 v3, v6, v3
+; GFX8-NEXT: v_min_i16_e32 v3, v3, v4
+; GFX8-NEXT: v_add_u16_e32 v2, v2, v3
+; GFX8-NEXT: v_lshlrev_b16_e32 v3, 8, v5
+; GFX8-NEXT: v_min_i16_e32 v6, 0, v3
+; GFX8-NEXT: v_max_i16_e32 v5, 0, v3
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v7
+; GFX8-NEXT: v_sub_u16_e32 v6, 0x8000, v6
+; GFX8-NEXT: v_sub_u16_e32 v5, v9, v5
+; GFX8-NEXT: v_max_i16_e32 v4, v6, v4
+; GFX8-NEXT: v_min_i16_e32 v4, v4, v5
+; GFX8-NEXT: v_add_u16_e32 v3, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v1), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_sdwa v0, sext(v0), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v2), v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v3), v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_v4i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 8
+; GFX9-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: s_mov_b32 s5, 0x8000
+; GFX9-NEXT: v_min_i16_e32 v10, 0, v0
+; GFX9-NEXT: v_lshrrev_b32_sdwa v5, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_sub_u16_e32 v10, s5, v10
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_max_i16_e32 v8, 0, v0
+; GFX9-NEXT: v_sub_u16_e32 v8, s4, v8
+; GFX9-NEXT: v_max_i16_e32 v1, v10, v1
+; GFX9-NEXT: v_min_i16_e32 v1, v1, v8
+; GFX9-NEXT: v_min_i16_e32 v8, 0, v2
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_max_i16_e32 v1, 0, v2
+; GFX9-NEXT: v_sub_u16_e32 v8, s5, v8
+; GFX9-NEXT: v_sub_u16_e32 v1, s4, v1
+; GFX9-NEXT: v_max_i16_e32 v5, v8, v5
+; GFX9-NEXT: v_min_i16_e32 v1, v5, v1
+; GFX9-NEXT: v_add_u16_e32 v1, v2, v1
+; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v3
+; GFX9-NEXT: v_lshlrev_b16_e32 v3, 8, v6
+; GFX9-NEXT: v_min_i16_e32 v6, 0, v2
+; GFX9-NEXT: v_sub_u16_e32 v6, s5, v6
+; GFX9-NEXT: v_mov_b32_e32 v9, 0x7fff
+; GFX9-NEXT: v_max_i16_e32 v5, 0, v2
+; GFX9-NEXT: v_sub_u16_e32 v5, v9, v5
+; GFX9-NEXT: v_max_i16_e32 v3, v6, v3
+; GFX9-NEXT: v_min_i16_e32 v3, v3, v5
+; GFX9-NEXT: v_add_u16_e32 v2, v2, v3
+; GFX9-NEXT: v_lshlrev_b16_e32 v3, 8, v4
+; GFX9-NEXT: v_min_i16_e32 v6, 0, v3
+; GFX9-NEXT: v_max_i16_e32 v5, 0, v3
+; GFX9-NEXT: v_lshlrev_b16_e32 v4, 8, v7
+; GFX9-NEXT: v_sub_u16_e32 v6, 0x8000, v6
+; GFX9-NEXT: s_movk_i32 s4, 0xff
+; GFX9-NEXT: v_sub_u16_e32 v5, v9, v5
+; GFX9-NEXT: v_max_i16_e32 v4, v6, v4
+; GFX9-NEXT: v_and_b32_sdwa v1, sext(v1), s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_min_i16_e32 v4, v4, v5
+; GFX9-NEXT: v_ashrrev_i16_e32 v0, 8, v0
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_add_u16_e32 v3, v3, v4
+; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, sext(v2), s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_and_b32_sdwa v2, sext(v3), s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_v4i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_lshlrev_b16_e64 v4, 8, v0
+; GFX10-NEXT: s_mov_b32 s4, 8
+; GFX10-NEXT: s_mov_b32 s5, 16
+; GFX10-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, s5, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_min_i16_e64 v8, v4, 0
+; GFX10-NEXT: v_lshrrev_b32_sdwa v6, s5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: s_mov_b32 s5, 0x8000
+; GFX10-NEXT: v_min_i16_e64 v9, v2, 0
+; GFX10-NEXT: v_lshlrev_b16_e64 v7, 8, v1
+; GFX10-NEXT: v_sub_nc_u16_e64 v8, s5, v8
+; GFX10-NEXT: v_max_i16_e64 v10, v4, 0
+; GFX10-NEXT: s_mov_b32 s6, 24
+; GFX10-NEXT: v_lshrrev_b32_sdwa v5, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: s_movk_i32 s4, 0x7fff
+; GFX10-NEXT: v_sub_nc_u16_e64 v15, s5, v9
+; GFX10-NEXT: v_lshrrev_b32_sdwa v0, s6, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_max_i16_e64 v11, v2, 0
+; GFX10-NEXT: v_max_i16_e64 v7, v8, v7
+; GFX10-NEXT: v_sub_nc_u16_e64 v10, s4, v10
+; GFX10-NEXT: v_max_i16_e64 v5, v15, v5
+; GFX10-NEXT: v_lshrrev_b32_sdwa v1, s6, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_sub_nc_u16_e64 v8, s4, v11
+; GFX10-NEXT: v_min_i16_e64 v11, v3, 0
+; GFX10-NEXT: v_min_i16_e64 v7, v7, v10
+; GFX10-NEXT: v_min_i16_e64 v10, v0, 0
+; GFX10-NEXT: v_mov_b32_e32 v9, 0x7fff
+; GFX10-NEXT: v_min_i16_e64 v5, v5, v8
+; GFX10-NEXT: v_sub_nc_u16_e64 v11, s5, v11
+; GFX10-NEXT: v_max_i16_e64 v8, v3, 0
+; GFX10-NEXT: v_sub_nc_u16_e64 v10, 0x8000, v10
+; GFX10-NEXT: v_max_i16_e64 v12, v0, 0
+; GFX10-NEXT: v_add_nc_u16_e64 v2, v2, v5
+; GFX10-NEXT: v_max_i16_e64 v6, v11, v6
+; GFX10-NEXT: v_sub_nc_u16_e64 v5, v9, v8
+; GFX10-NEXT: v_max_i16_e64 v1, v10, v1
+; GFX10-NEXT: v_sub_nc_u16_e64 v8, v9, v12
+; GFX10-NEXT: s_movk_i32 s4, 0xff
+; GFX10-NEXT: v_add_nc_u16_e64 v4, v4, v7
+; GFX10-NEXT: v_and_b32_sdwa v2, sext(v2), s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_min_i16_e64 v5, v6, v5
+; GFX10-NEXT: v_min_i16_e64 v1, v1, v8
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_ashrrev_i16_e64 v4, 8, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 8, v2
+; GFX10-NEXT: v_add_nc_u16_e64 v3, v3, v5
+; GFX10-NEXT: v_add_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: v_and_or_b32 v1, v4, s4, v2
+; GFX10-NEXT: v_and_b32_sdwa v2, sext(v3), s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_b32_sdwa v0, sext(v0), s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_or3_b32 v0, v1, v2, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %lhs = bitcast i32 %lhs.arg to <4 x i8>
+ %rhs = bitcast i32 %rhs.arg to <4 x i8>
+ %result = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %lhs, <4 x i8> %rhs)
+ %cast.result = bitcast <4 x i8> %result to i32
+ ret i32 %cast.result
+}
+
+define amdgpu_ps i32 @s_saddsat_v4i8(i32 inreg %lhs.arg, i32 inreg %rhs.arg) {
+; GFX6-LABEL: s_saddsat_v4i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshr_b32 s2, s0, 8
+; GFX6-NEXT: s_lshr_b32 s3, s0, 16
+; GFX6-NEXT: s_lshr_b32 s4, s0, 24
+; GFX6-NEXT: s_lshr_b32 s5, s1, 8
+; GFX6-NEXT: s_lshr_b32 s6, s1, 16
+; GFX6-NEXT: s_lshr_b32 s7, s1, 24
+; GFX6-NEXT: s_lshl_b32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_brev_b32 s8, -2
+; GFX6-NEXT: s_cselect_b32 s10, s0, 0
+; GFX6-NEXT: s_sub_i32 s10, s8, s10
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_mov_b32 s9, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s11, s0, 0
+; GFX6-NEXT: s_sub_i32 s11, s9, s11
+; GFX6-NEXT: s_cmp_gt_i32 s11, s1
+; GFX6-NEXT: s_cselect_b32 s1, s11, s1
+; GFX6-NEXT: s_cmp_lt_i32 s1, s10
+; GFX6-NEXT: s_cselect_b32 s1, s1, s10
+; GFX6-NEXT: s_add_i32 s0, s0, s1
+; GFX6-NEXT: s_lshl_b32 s1, s2, 24
+; GFX6-NEXT: s_ashr_i32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s2, s5, 24
+; GFX6-NEXT: s_cmp_gt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s5, s1, 0
+; GFX6-NEXT: s_sub_i32 s5, s8, s5
+; GFX6-NEXT: s_cmp_lt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s10, s1, 0
+; GFX6-NEXT: s_sub_i32 s10, s9, s10
+; GFX6-NEXT: s_cmp_gt_i32 s10, s2
+; GFX6-NEXT: s_cselect_b32 s2, s10, s2
+; GFX6-NEXT: s_cmp_lt_i32 s2, s5
+; GFX6-NEXT: s_cselect_b32 s2, s2, s5
+; GFX6-NEXT: s_add_i32 s1, s1, s2
+; GFX6-NEXT: s_lshl_b32 s2, s3, 24
+; GFX6-NEXT: s_ashr_i32 s1, s1, 24
+; GFX6-NEXT: s_lshl_b32 s3, s6, 24
+; GFX6-NEXT: s_cmp_gt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s5, s2, 0
+; GFX6-NEXT: s_sub_i32 s5, s8, s5
+; GFX6-NEXT: s_cmp_lt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s6, s2, 0
+; GFX6-NEXT: s_sub_i32 s6, s9, s6
+; GFX6-NEXT: s_cmp_gt_i32 s6, s3
+; GFX6-NEXT: s_cselect_b32 s3, s6, s3
+; GFX6-NEXT: s_cmp_lt_i32 s3, s5
+; GFX6-NEXT: s_cselect_b32 s3, s3, s5
+; GFX6-NEXT: s_add_i32 s2, s2, s3
+; GFX6-NEXT: s_lshl_b32 s3, s4, 24
+; GFX6-NEXT: s_ashr_i32 s2, s2, 24
+; GFX6-NEXT: s_lshl_b32 s4, s7, 24
+; GFX6-NEXT: s_cmp_gt_i32 s3, 0
+; GFX6-NEXT: s_cselect_b32 s5, s3, 0
+; GFX6-NEXT: s_sub_i32 s5, s8, s5
+; GFX6-NEXT: s_cmp_lt_i32 s3, 0
+; GFX6-NEXT: s_cselect_b32 s6, s3, 0
+; GFX6-NEXT: s_sub_i32 s6, s9, s6
+; GFX6-NEXT: s_cmp_gt_i32 s6, s4
+; GFX6-NEXT: s_cselect_b32 s4, s6, s4
+; GFX6-NEXT: s_cmp_lt_i32 s4, s5
+; GFX6-NEXT: s_cselect_b32 s4, s4, s5
+; GFX6-NEXT: s_add_i32 s3, s3, s4
+; GFX6-NEXT: s_movk_i32 s4, 0xff
+; GFX6-NEXT: s_and_b32 s1, s1, s4
+; GFX6-NEXT: s_and_b32 s0, s0, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 8
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_ashr_i32 s3, s3, 24
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s3, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_v4i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s8, 8, 0x100000
+; GFX8-NEXT: s_lshr_b32 s2, s0, 8
+; GFX8-NEXT: s_lshr_b32 s3, s0, 16
+; GFX8-NEXT: s_lshr_b32 s4, s0, 24
+; GFX8-NEXT: s_lshl_b32 s0, s0, s8
+; GFX8-NEXT: s_lshr_b32 s5, s1, 8
+; GFX8-NEXT: s_lshr_b32 s6, s1, 16
+; GFX8-NEXT: s_lshr_b32 s7, s1, 24
+; GFX8-NEXT: s_lshl_b32 s1, s1, s8
+; GFX8-NEXT: s_sext_i32_i16 s11, s0
+; GFX8-NEXT: s_sext_i32_i16 s12, 0
+; GFX8-NEXT: s_cmp_gt_i32 s11, s12
+; GFX8-NEXT: s_movk_i32 s9, 0x7fff
+; GFX8-NEXT: s_cselect_b32 s13, s11, s12
+; GFX8-NEXT: s_sub_i32 s13, s9, s13
+; GFX8-NEXT: s_cmp_lt_i32 s11, s12
+; GFX8-NEXT: s_mov_b32 s10, 0x8000
+; GFX8-NEXT: s_cselect_b32 s11, s11, s12
+; GFX8-NEXT: s_sub_i32 s11, s10, s11
+; GFX8-NEXT: s_sext_i32_i16 s11, s11
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_cmp_gt_i32 s11, s1
+; GFX8-NEXT: s_cselect_b32 s1, s11, s1
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_sext_i32_i16 s11, s13
+; GFX8-NEXT: s_cmp_lt_i32 s1, s11
+; GFX8-NEXT: s_cselect_b32 s1, s1, s11
+; GFX8-NEXT: s_add_i32 s0, s0, s1
+; GFX8-NEXT: s_sext_i32_i16 s0, s0
+; GFX8-NEXT: s_lshl_b32 s1, s2, s8
+; GFX8-NEXT: s_lshl_b32 s2, s5, s8
+; GFX8-NEXT: s_ashr_i32 s0, s0, s8
+; GFX8-NEXT: s_sext_i32_i16 s5, s1
+; GFX8-NEXT: s_cmp_gt_i32 s5, s12
+; GFX8-NEXT: s_cselect_b32 s11, s5, s12
+; GFX8-NEXT: s_sub_i32 s11, s9, s11
+; GFX8-NEXT: s_cmp_lt_i32 s5, s12
+; GFX8-NEXT: s_cselect_b32 s5, s5, s12
+; GFX8-NEXT: s_sub_i32 s5, s10, s5
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_cmp_gt_i32 s5, s2
+; GFX8-NEXT: s_cselect_b32 s2, s5, s2
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_sext_i32_i16 s5, s11
+; GFX8-NEXT: s_cmp_lt_i32 s2, s5
+; GFX8-NEXT: s_cselect_b32 s2, s2, s5
+; GFX8-NEXT: s_add_i32 s1, s1, s2
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_lshl_b32 s2, s3, s8
+; GFX8-NEXT: s_lshl_b32 s3, s6, s8
+; GFX8-NEXT: s_ashr_i32 s1, s1, s8
+; GFX8-NEXT: s_sext_i32_i16 s5, s2
+; GFX8-NEXT: s_cmp_gt_i32 s5, s12
+; GFX8-NEXT: s_cselect_b32 s6, s5, s12
+; GFX8-NEXT: s_sub_i32 s6, s9, s6
+; GFX8-NEXT: s_cmp_lt_i32 s5, s12
+; GFX8-NEXT: s_cselect_b32 s5, s5, s12
+; GFX8-NEXT: s_sub_i32 s5, s10, s5
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_cmp_gt_i32 s5, s3
+; GFX8-NEXT: s_cselect_b32 s3, s5, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s5, s6
+; GFX8-NEXT: s_cmp_lt_i32 s3, s5
+; GFX8-NEXT: s_cselect_b32 s3, s3, s5
+; GFX8-NEXT: s_add_i32 s2, s2, s3
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_lshl_b32 s3, s4, s8
+; GFX8-NEXT: s_lshl_b32 s4, s7, s8
+; GFX8-NEXT: s_ashr_i32 s2, s2, s8
+; GFX8-NEXT: s_sext_i32_i16 s5, s3
+; GFX8-NEXT: s_cmp_gt_i32 s5, s12
+; GFX8-NEXT: s_cselect_b32 s6, s5, s12
+; GFX8-NEXT: s_sub_i32 s6, s9, s6
+; GFX8-NEXT: s_cmp_lt_i32 s5, s12
+; GFX8-NEXT: s_cselect_b32 s5, s5, s12
+; GFX8-NEXT: s_sub_i32 s5, s10, s5
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_cmp_gt_i32 s5, s4
+; GFX8-NEXT: s_cselect_b32 s4, s5, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s5, s6
+; GFX8-NEXT: s_cmp_lt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s3, s3, s4
+; GFX8-NEXT: s_movk_i32 s4, 0xff
+; GFX8-NEXT: s_and_b32 s1, s1, s4
+; GFX8-NEXT: s_and_b32 s0, s0, s4
+; GFX8-NEXT: s_lshl_b32 s1, s1, 8
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: s_and_b32 s1, s2, s4
+; GFX8-NEXT: s_lshl_b32 s1, s1, 16
+; GFX8-NEXT: s_ashr_i32 s3, s3, s8
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: s_and_b32 s1, s3, s4
+; GFX8-NEXT: s_lshl_b32 s1, s1, 24
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_v4i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s8, 8, 0x100000
+; GFX9-NEXT: s_lshr_b32 s2, s0, 8
+; GFX9-NEXT: s_lshr_b32 s3, s0, 16
+; GFX9-NEXT: s_lshr_b32 s4, s0, 24
+; GFX9-NEXT: s_lshl_b32 s0, s0, s8
+; GFX9-NEXT: s_lshr_b32 s5, s1, 8
+; GFX9-NEXT: s_lshr_b32 s6, s1, 16
+; GFX9-NEXT: s_lshr_b32 s7, s1, 24
+; GFX9-NEXT: s_lshl_b32 s1, s1, s8
+; GFX9-NEXT: s_sext_i32_i16 s11, s0
+; GFX9-NEXT: s_sext_i32_i16 s12, 0
+; GFX9-NEXT: s_cmp_gt_i32 s11, s12
+; GFX9-NEXT: s_movk_i32 s9, 0x7fff
+; GFX9-NEXT: s_cselect_b32 s13, s11, s12
+; GFX9-NEXT: s_sub_i32 s13, s9, s13
+; GFX9-NEXT: s_cmp_lt_i32 s11, s12
+; GFX9-NEXT: s_mov_b32 s10, 0x8000
+; GFX9-NEXT: s_cselect_b32 s11, s11, s12
+; GFX9-NEXT: s_sub_i32 s11, s10, s11
+; GFX9-NEXT: s_sext_i32_i16 s11, s11
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_cmp_gt_i32 s11, s1
+; GFX9-NEXT: s_cselect_b32 s1, s11, s1
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_sext_i32_i16 s11, s13
+; GFX9-NEXT: s_cmp_lt_i32 s1, s11
+; GFX9-NEXT: s_cselect_b32 s1, s1, s11
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: s_sext_i32_i16 s0, s0
+; GFX9-NEXT: s_lshl_b32 s1, s2, s8
+; GFX9-NEXT: s_lshl_b32 s2, s5, s8
+; GFX9-NEXT: s_ashr_i32 s0, s0, s8
+; GFX9-NEXT: s_sext_i32_i16 s5, s1
+; GFX9-NEXT: s_cmp_gt_i32 s5, s12
+; GFX9-NEXT: s_cselect_b32 s11, s5, s12
+; GFX9-NEXT: s_sub_i32 s11, s9, s11
+; GFX9-NEXT: s_cmp_lt_i32 s5, s12
+; GFX9-NEXT: s_cselect_b32 s5, s5, s12
+; GFX9-NEXT: s_sub_i32 s5, s10, s5
+; GFX9-NEXT: s_sext_i32_i16 s5, s5
+; GFX9-NEXT: s_sext_i32_i16 s2, s2
+; GFX9-NEXT: s_cmp_gt_i32 s5, s2
+; GFX9-NEXT: s_cselect_b32 s2, s5, s2
+; GFX9-NEXT: s_sext_i32_i16 s2, s2
+; GFX9-NEXT: s_sext_i32_i16 s5, s11
+; GFX9-NEXT: s_cmp_lt_i32 s2, s5
+; GFX9-NEXT: s_cselect_b32 s2, s2, s5
+; GFX9-NEXT: s_add_i32 s1, s1, s2
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_lshl_b32 s2, s3, s8
+; GFX9-NEXT: s_lshl_b32 s3, s6, s8
+; GFX9-NEXT: s_ashr_i32 s1, s1, s8
+; GFX9-NEXT: s_sext_i32_i16 s5, s2
+; GFX9-NEXT: s_cmp_gt_i32 s5, s12
+; GFX9-NEXT: s_cselect_b32 s6, s5, s12
+; GFX9-NEXT: s_sub_i32 s6, s9, s6
+; GFX9-NEXT: s_cmp_lt_i32 s5, s12
+; GFX9-NEXT: s_cselect_b32 s5, s5, s12
+; GFX9-NEXT: s_sub_i32 s5, s10, s5
+; GFX9-NEXT: s_sext_i32_i16 s5, s5
+; GFX9-NEXT: s_sext_i32_i16 s3, s3
+; GFX9-NEXT: s_cmp_gt_i32 s5, s3
+; GFX9-NEXT: s_cselect_b32 s3, s5, s3
+; GFX9-NEXT: s_sext_i32_i16 s3, s3
+; GFX9-NEXT: s_sext_i32_i16 s5, s6
+; GFX9-NEXT: s_cmp_lt_i32 s3, s5
+; GFX9-NEXT: s_cselect_b32 s3, s3, s5
+; GFX9-NEXT: s_add_i32 s2, s2, s3
+; GFX9-NEXT: s_sext_i32_i16 s2, s2
+; GFX9-NEXT: s_lshl_b32 s3, s4, s8
+; GFX9-NEXT: s_lshl_b32 s4, s7, s8
+; GFX9-NEXT: s_ashr_i32 s2, s2, s8
+; GFX9-NEXT: s_sext_i32_i16 s5, s3
+; GFX9-NEXT: s_cmp_gt_i32 s5, s12
+; GFX9-NEXT: s_cselect_b32 s6, s5, s12
+; GFX9-NEXT: s_sub_i32 s6, s9, s6
+; GFX9-NEXT: s_cmp_lt_i32 s5, s12
+; GFX9-NEXT: s_cselect_b32 s5, s5, s12
+; GFX9-NEXT: s_sub_i32 s5, s10, s5
+; GFX9-NEXT: s_sext_i32_i16 s5, s5
+; GFX9-NEXT: s_sext_i32_i16 s4, s4
+; GFX9-NEXT: s_cmp_gt_i32 s5, s4
+; GFX9-NEXT: s_cselect_b32 s4, s5, s4
+; GFX9-NEXT: s_sext_i32_i16 s4, s4
+; GFX9-NEXT: s_sext_i32_i16 s5, s6
+; GFX9-NEXT: s_cmp_lt_i32 s4, s5
+; GFX9-NEXT: s_cselect_b32 s4, s4, s5
+; GFX9-NEXT: s_add_i32 s3, s3, s4
+; GFX9-NEXT: s_movk_i32 s4, 0xff
+; GFX9-NEXT: s_and_b32 s1, s1, s4
+; GFX9-NEXT: s_and_b32 s0, s0, s4
+; GFX9-NEXT: s_lshl_b32 s1, s1, 8
+; GFX9-NEXT: s_sext_i32_i16 s3, s3
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: s_and_b32 s1, s2, s4
+; GFX9-NEXT: s_lshl_b32 s1, s1, 16
+; GFX9-NEXT: s_ashr_i32 s3, s3, s8
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: s_and_b32 s1, s3, s4
+; GFX9-NEXT: s_lshl_b32 s1, s1, 24
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_v4i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s6, 8, 0x100000
+; GFX10-NEXT: s_lshr_b32 s2, s0, 8
+; GFX10-NEXT: s_lshr_b32 s3, s0, 16
+; GFX10-NEXT: s_lshr_b32 s4, s0, 24
+; GFX10-NEXT: s_lshl_b32 s0, s0, s6
+; GFX10-NEXT: s_sext_i32_i16 s10, 0
+; GFX10-NEXT: s_sext_i32_i16 s9, s0
+; GFX10-NEXT: s_lshr_b32 s5, s1, 8
+; GFX10-NEXT: s_lshr_b32 s7, s1, 16
+; GFX10-NEXT: s_lshr_b32 s8, s1, 24
+; GFX10-NEXT: s_lshl_b32 s1, s1, s6
+; GFX10-NEXT: s_cmp_gt_i32 s9, s10
+; GFX10-NEXT: s_movk_i32 s11, 0x7fff
+; GFX10-NEXT: s_cselect_b32 s12, s9, s10
+; GFX10-NEXT: s_mov_b32 s13, 0x8000
+; GFX10-NEXT: s_sub_i32 s12, s11, s12
+; GFX10-NEXT: s_cmp_lt_i32 s9, s10
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cselect_b32 s9, s9, s10
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_sub_i32 s9, s13, s9
+; GFX10-NEXT: s_sext_i32_i16 s9, s9
+; GFX10-NEXT: s_cmp_gt_i32 s9, s1
+; GFX10-NEXT: s_cselect_b32 s1, s9, s1
+; GFX10-NEXT: s_sext_i32_i16 s9, s12
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s9
+; GFX10-NEXT: s_cselect_b32 s1, s1, s9
+; GFX10-NEXT: s_lshl_b32 s5, s5, s6
+; GFX10-NEXT: s_add_i32 s0, s0, s1
+; GFX10-NEXT: s_lshl_b32 s1, s2, s6
+; GFX10-NEXT: s_sext_i32_i16 s0, s0
+; GFX10-NEXT: s_sext_i32_i16 s2, s1
+; GFX10-NEXT: s_ashr_i32 s0, s0, s6
+; GFX10-NEXT: s_cmp_gt_i32 s2, s10
+; GFX10-NEXT: s_sext_i32_i16 s5, s5
+; GFX10-NEXT: s_cselect_b32 s9, s2, s10
+; GFX10-NEXT: s_sub_i32 s9, s11, s9
+; GFX10-NEXT: s_cmp_lt_i32 s2, s10
+; GFX10-NEXT: s_cselect_b32 s2, s2, s10
+; GFX10-NEXT: s_sub_i32 s2, s13, s2
+; GFX10-NEXT: s_sext_i32_i16 s2, s2
+; GFX10-NEXT: s_cmp_gt_i32 s2, s5
+; GFX10-NEXT: s_cselect_b32 s2, s2, s5
+; GFX10-NEXT: s_sext_i32_i16 s5, s9
+; GFX10-NEXT: s_sext_i32_i16 s2, s2
+; GFX10-NEXT: s_cmp_lt_i32 s2, s5
+; GFX10-NEXT: s_cselect_b32 s2, s2, s5
+; GFX10-NEXT: s_lshl_b32 s3, s3, s6
+; GFX10-NEXT: s_add_i32 s1, s1, s2
+; GFX10-NEXT: s_sext_i32_i16 s5, s3
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_lshl_b32 s2, s7, s6
+; GFX10-NEXT: s_ashr_i32 s1, s1, s6
+; GFX10-NEXT: s_cmp_gt_i32 s5, s10
+; GFX10-NEXT: s_sext_i32_i16 s2, s2
+; GFX10-NEXT: s_cselect_b32 s7, s5, s10
+; GFX10-NEXT: s_sub_i32 s7, s11, s7
+; GFX10-NEXT: s_cmp_lt_i32 s5, s10
+; GFX10-NEXT: s_cselect_b32 s5, s5, s10
+; GFX10-NEXT: s_sub_i32 s5, s13, s5
+; GFX10-NEXT: s_sext_i32_i16 s5, s5
+; GFX10-NEXT: s_cmp_gt_i32 s5, s2
+; GFX10-NEXT: s_cselect_b32 s2, s5, s2
+; GFX10-NEXT: s_sext_i32_i16 s5, s7
+; GFX10-NEXT: s_sext_i32_i16 s2, s2
+; GFX10-NEXT: s_cmp_lt_i32 s2, s5
+; GFX10-NEXT: s_cselect_b32 s2, s2, s5
+; GFX10-NEXT: s_lshl_b32 s4, s4, s6
+; GFX10-NEXT: s_add_i32 s3, s3, s2
+; GFX10-NEXT: s_sext_i32_i16 s5, s4
+; GFX10-NEXT: s_sext_i32_i16 s3, s3
+; GFX10-NEXT: s_lshl_b32 s2, s8, s6
+; GFX10-NEXT: s_ashr_i32 s3, s3, s6
+; GFX10-NEXT: s_cmp_gt_i32 s5, s10
+; GFX10-NEXT: s_sext_i32_i16 s2, s2
+; GFX10-NEXT: s_cselect_b32 s7, s5, s10
+; GFX10-NEXT: s_sub_i32 s7, s11, s7
+; GFX10-NEXT: s_cmp_lt_i32 s5, s10
+; GFX10-NEXT: s_cselect_b32 s5, s5, s10
+; GFX10-NEXT: s_sub_i32 s5, s13, s5
+; GFX10-NEXT: s_sext_i32_i16 s5, s5
+; GFX10-NEXT: s_cmp_gt_i32 s5, s2
+; GFX10-NEXT: s_cselect_b32 s2, s5, s2
+; GFX10-NEXT: s_sext_i32_i16 s5, s7
+; GFX10-NEXT: s_sext_i32_i16 s2, s2
+; GFX10-NEXT: s_movk_i32 s7, 0xff
+; GFX10-NEXT: s_cmp_lt_i32 s2, s5
+; GFX10-NEXT: s_cselect_b32 s2, s2, s5
+; GFX10-NEXT: s_and_b32 s1, s1, s7
+; GFX10-NEXT: s_add_i32 s4, s4, s2
+; GFX10-NEXT: s_and_b32 s2, s3, s7
+; GFX10-NEXT: s_sext_i32_i16 s3, s4
+; GFX10-NEXT: s_and_b32 s0, s0, s7
+; GFX10-NEXT: s_lshl_b32 s1, s1, 8
+; GFX10-NEXT: s_ashr_i32 s3, s3, s6
+; GFX10-NEXT: s_or_b32 s0, s0, s1
+; GFX10-NEXT: s_lshl_b32 s1, s2, 16
+; GFX10-NEXT: s_and_b32 s2, s3, s7
+; GFX10-NEXT: s_or_b32 s0, s0, s1
+; GFX10-NEXT: s_lshl_b32 s1, s2, 24
+; GFX10-NEXT: s_or_b32 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %lhs = bitcast i32 %lhs.arg to <4 x i8>
+ %rhs = bitcast i32 %rhs.arg to <4 x i8>
+ %result = call <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8> %lhs, <4 x i8> %rhs)
+ %cast.result = bitcast <4 x i8> %result to i32
+ ret i32 %cast.result
+}
+
+define i24 @v_saddsat_i24(i24 %lhs, i24 %rhs) {
+; GFX6-LABEL: v_saddsat_i24:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX6-NEXT: v_min_i32_e32 v3, 0, v0
+; GFX6-NEXT: v_max_i32_e32 v2, 0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0x80000000, v3
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, 0x7fffffff, v2
+; GFX6-NEXT: v_max_i32_e32 v1, v3, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 8, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_i24:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v0, v1
+; GFX8-NEXT: v_bfe_i32 v3, v2, 0, 24
+; GFX8-NEXT: v_bfe_i32 v0, v0, 0, 24
+; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v0
+; GFX8-NEXT: v_bfe_i32 v0, v1, 0, 24
+; GFX8-NEXT: s_bfe_i32 s6, 0, 0x180000
+; GFX8-NEXT: v_cmp_gt_i32_e64 s[6:7], s6, v0
+; GFX8-NEXT: v_ashrrev_i32_e32 v0, 23, v3
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0xff800000, v0
+; GFX8-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_i24:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX9-NEXT: v_min_i32_e32 v3, 0, v0
+; GFX9-NEXT: v_max_i32_e32 v2, 0, v0
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_sub_u32_e32 v3, 0x80000000, v3
+; GFX9-NEXT: v_sub_u32_e32 v2, 0x7fffffff, v2
+; GFX9-NEXT: v_max_i32_e32 v1, v3, v1
+; GFX9-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
+; GFX9-NEXT: v_ashrrev_i32_e32 v0, 8, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_i24:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_i32_e32 v2, 0, v0
+; GFX10-NEXT: v_max_i32_e32 v3, 0, v0
+; GFX10-NEXT: v_sub_nc_u32_e32 v2, 0x80000000, v2
+; GFX10-NEXT: v_sub_nc_u32_e32 v3, 0x7fffffff, v3
+; GFX10-NEXT: v_max_i32_e32 v1, v2, v1
+; GFX10-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT: v_ashrrev_i32_e32 v0, 8, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i24 @llvm.sadd.sat.i24(i24 %lhs, i24 %rhs)
+ ret i24 %result
+}
+
+define amdgpu_ps i24 @s_saddsat_i24(i24 inreg %lhs, i24 inreg %rhs) {
+; GFX6-LABEL: s_saddsat_i24:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 8
+; GFX6-NEXT: s_lshl_b32 s1, s1, 8
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s2, s0, 0
+; GFX6-NEXT: s_sub_i32 s2, 0x7fffffff, s2
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s3, s0, 0
+; GFX6-NEXT: s_sub_i32 s3, 0x80000000, s3
+; GFX6-NEXT: s_cmp_gt_i32 s3, s1
+; GFX6-NEXT: s_cselect_b32 s1, s3, s1
+; GFX6-NEXT: s_cmp_lt_i32 s1, s2
+; GFX6-NEXT: s_cselect_b32 s1, s1, s2
+; GFX6-NEXT: s_add_i32 s0, s0, s1
+; GFX6-NEXT: s_ashr_i32 s0, s0, 8
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_i24:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_add_i32 s2, s0, s1
+; GFX8-NEXT: s_bfe_i32 s3, s2, 0x180000
+; GFX8-NEXT: s_bfe_i32 s0, s0, 0x180000
+; GFX8-NEXT: s_cmp_lt_i32 s3, s0
+; GFX8-NEXT: s_cselect_b32 s0, 1, 0
+; GFX8-NEXT: s_bfe_i32 s1, s1, 0x180000
+; GFX8-NEXT: s_bfe_i32 s4, 0, 0x180000
+; GFX8-NEXT: s_cmp_lt_i32 s1, s4
+; GFX8-NEXT: s_cselect_b32 s1, 1, 0
+; GFX8-NEXT: s_xor_b32 s0, s1, s0
+; GFX8-NEXT: s_ashr_i32 s1, s3, 23
+; GFX8-NEXT: s_add_i32 s1, s1, 0xff800000
+; GFX8-NEXT: s_and_b32 s0, s0, 1
+; GFX8-NEXT: s_cmp_lg_u32 s0, 0
+; GFX8-NEXT: s_cselect_b32 s0, s1, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_i24:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_lshl_b32 s0, s0, 8
+; GFX9-NEXT: s_lshl_b32 s1, s1, 8
+; GFX9-NEXT: s_cmp_gt_i32 s0, 0
+; GFX9-NEXT: s_cselect_b32 s2, s0, 0
+; GFX9-NEXT: s_sub_i32 s2, 0x7fffffff, s2
+; GFX9-NEXT: s_cmp_lt_i32 s0, 0
+; GFX9-NEXT: s_cselect_b32 s3, s0, 0
+; GFX9-NEXT: s_sub_i32 s3, 0x80000000, s3
+; GFX9-NEXT: s_cmp_gt_i32 s3, s1
+; GFX9-NEXT: s_cselect_b32 s1, s3, s1
+; GFX9-NEXT: s_cmp_lt_i32 s1, s2
+; GFX9-NEXT: s_cselect_b32 s1, s1, s2
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: s_ashr_i32 s0, s0, 8
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_i24:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_lshl_b32 s0, s0, 8
+; GFX10-NEXT: s_lshl_b32 s1, s1, 8
+; GFX10-NEXT: s_cmp_gt_i32 s0, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s2, s0, 0
+; GFX10-NEXT: s_sub_i32 s2, 0x7fffffff, s2
+; GFX10-NEXT: s_cmp_lt_i32 s0, 0
+; GFX10-NEXT: s_cselect_b32 s3, s0, 0
+; GFX10-NEXT: s_sub_i32 s3, 0x80000000, s3
+; GFX10-NEXT: s_cmp_gt_i32 s3, s1
+; GFX10-NEXT: s_cselect_b32 s1, s3, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s2
+; GFX10-NEXT: s_cselect_b32 s1, s1, s2
+; GFX10-NEXT: s_add_i32 s0, s0, s1
+; GFX10-NEXT: s_ashr_i32 s0, s0, 8
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i24 @llvm.sadd.sat.i24(i24 %lhs, i24 %rhs)
+ ret i24 %result
+}
+
+define i32 @v_saddsat_i32(i32 %lhs, i32 %rhs) {
+; GFX6-LABEL: v_saddsat_i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_min_i32_e32 v3, 0, v0
+; GFX6-NEXT: v_max_i32_e32 v2, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0x80000000, v3
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, 0x7fffffff, v2
+; GFX6-NEXT: v_max_i32_e32 v1, v3, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_i32_e32 v3, 0, v0
+; GFX8-NEXT: v_max_i32_e32 v2, 0, v0
+; GFX8-NEXT: v_sub_u32_e32 v3, vcc, 0x80000000, v3
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, 0x7fffffff, v2
+; GFX8-NEXT: v_max_i32_e32 v1, v3, v1
+; GFX8-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_i32_e32 v3, 0, v0
+; GFX9-NEXT: v_max_i32_e32 v2, 0, v0
+; GFX9-NEXT: v_sub_u32_e32 v3, 0x80000000, v3
+; GFX9-NEXT: v_sub_u32_e32 v2, 0x7fffffff, v2
+; GFX9-NEXT: v_max_i32_e32 v1, v3, v1
+; GFX9-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_i32_e32 v2, 0, v0
+; GFX10-NEXT: v_max_i32_e32 v3, 0, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u32_e32 v2, 0x80000000, v2
+; GFX10-NEXT: v_sub_nc_u32_e32 v3, 0x7fffffff, v3
+; GFX10-NEXT: v_max_i32_e32 v1, v2, v1
+; GFX10-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i32 @llvm.sadd.sat.i32(i32 %lhs, i32 %rhs)
+ ret i32 %result
+}
+
+define amdgpu_ps i32 @s_saddsat_i32(i32 inreg %lhs, i32 inreg %rhs) {
+; GCN-LABEL: s_saddsat_i32:
+; GCN: ; %bb.0:
+; GCN-NEXT: s_cmp_gt_i32 s0, 0
+; GCN-NEXT: ; implicit-def: $vcc_hi
+; GCN-NEXT: s_cselect_b32 s2, s0, 0
+; GCN-NEXT: s_sub_i32 s2, 0x7fffffff, s2
+; GCN-NEXT: s_cmp_lt_i32 s0, 0
+; GCN-NEXT: s_cselect_b32 s3, s0, 0
+; GCN-NEXT: s_sub_i32 s3, 0x80000000, s3
+; GCN-NEXT: s_cmp_gt_i32 s3, s1
+; GCN-NEXT: s_cselect_b32 s1, s3, s1
+; GCN-NEXT: s_cmp_lt_i32 s1, s2
+; GCN-NEXT: s_cselect_b32 s1, s1, s2
+; GCN-NEXT: s_add_i32 s0, s0, s1
+; GCN-NEXT: ; return to shader part epilog
+; GFX6-LABEL: s_saddsat_i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s2, s0, 0
+; GFX6-NEXT: s_sub_i32 s2, 0x7fffffff, s2
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s3, s0, 0
+; GFX6-NEXT: s_sub_i32 s3, 0x80000000, s3
+; GFX6-NEXT: s_cmp_gt_i32 s3, s1
+; GFX6-NEXT: s_cselect_b32 s1, s3, s1
+; GFX6-NEXT: s_cmp_lt_i32 s1, s2
+; GFX6-NEXT: s_cselect_b32 s1, s1, s2
+; GFX6-NEXT: s_add_i32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_gt_i32 s0, 0
+; GFX8-NEXT: s_cselect_b32 s2, s0, 0
+; GFX8-NEXT: s_sub_i32 s2, 0x7fffffff, s2
+; GFX8-NEXT: s_cmp_lt_i32 s0, 0
+; GFX8-NEXT: s_cselect_b32 s3, s0, 0
+; GFX8-NEXT: s_sub_i32 s3, 0x80000000, s3
+; GFX8-NEXT: s_cmp_gt_i32 s3, s1
+; GFX8-NEXT: s_cselect_b32 s1, s3, s1
+; GFX8-NEXT: s_cmp_lt_i32 s1, s2
+; GFX8-NEXT: s_cselect_b32 s1, s1, s2
+; GFX8-NEXT: s_add_i32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_gt_i32 s0, 0
+; GFX9-NEXT: s_cselect_b32 s2, s0, 0
+; GFX9-NEXT: s_sub_i32 s2, 0x7fffffff, s2
+; GFX9-NEXT: s_cmp_lt_i32 s0, 0
+; GFX9-NEXT: s_cselect_b32 s3, s0, 0
+; GFX9-NEXT: s_sub_i32 s3, 0x80000000, s3
+; GFX9-NEXT: s_cmp_gt_i32 s3, s1
+; GFX9-NEXT: s_cselect_b32 s1, s3, s1
+; GFX9-NEXT: s_cmp_lt_i32 s1, s2
+; GFX9-NEXT: s_cselect_b32 s1, s1, s2
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_gt_i32 s0, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s2, s0, 0
+; GFX10-NEXT: s_sub_i32 s2, 0x7fffffff, s2
+; GFX10-NEXT: s_cmp_lt_i32 s0, 0
+; GFX10-NEXT: s_cselect_b32 s3, s0, 0
+; GFX10-NEXT: s_sub_i32 s3, 0x80000000, s3
+; GFX10-NEXT: s_cmp_gt_i32 s3, s1
+; GFX10-NEXT: s_cselect_b32 s1, s3, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s2
+; GFX10-NEXT: s_cselect_b32 s1, s1, s2
+; GFX10-NEXT: s_add_i32 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i32 @llvm.sadd.sat.i32(i32 %lhs, i32 %rhs)
+ ret i32 %result
+}
+
+define amdgpu_ps float @saddsat_i32_sv(i32 inreg %lhs, i32 %rhs) {
+; GFX6-LABEL: saddsat_i32_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s1, s0, 0
+; GFX6-NEXT: s_sub_i32 s1, 0x7fffffff, s1
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s2, s0, 0
+; GFX6-NEXT: s_sub_i32 s2, 0x80000000, s2
+; GFX6-NEXT: v_max_i32_e32 v0, s2, v0
+; GFX6-NEXT: v_min_i32_e32 v0, s1, v0
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: saddsat_i32_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_gt_i32 s0, 0
+; GFX8-NEXT: s_cselect_b32 s1, s0, 0
+; GFX8-NEXT: s_sub_i32 s1, 0x7fffffff, s1
+; GFX8-NEXT: s_cmp_lt_i32 s0, 0
+; GFX8-NEXT: s_cselect_b32 s2, s0, 0
+; GFX8-NEXT: s_sub_i32 s2, 0x80000000, s2
+; GFX8-NEXT: v_max_i32_e32 v0, s2, v0
+; GFX8-NEXT: v_min_i32_e32 v0, s1, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: saddsat_i32_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_gt_i32 s0, 0
+; GFX9-NEXT: s_cselect_b32 s1, s0, 0
+; GFX9-NEXT: s_sub_i32 s1, 0x7fffffff, s1
+; GFX9-NEXT: s_cmp_lt_i32 s0, 0
+; GFX9-NEXT: s_cselect_b32 s2, s0, 0
+; GFX9-NEXT: s_sub_i32 s2, 0x80000000, s2
+; GFX9-NEXT: v_max_i32_e32 v0, s2, v0
+; GFX9-NEXT: v_min_i32_e32 v0, s1, v0
+; GFX9-NEXT: v_add_u32_e32 v0, s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: saddsat_i32_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_gt_i32 s0, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s1, s0, 0
+; GFX10-NEXT: s_sub_i32 s1, 0x7fffffff, s1
+; GFX10-NEXT: s_cmp_lt_i32 s0, 0
+; GFX10-NEXT: s_cselect_b32 s2, s0, 0
+; GFX10-NEXT: s_sub_i32 s2, 0x80000000, s2
+; GFX10-NEXT: v_max_i32_e32 v0, s2, v0
+; GFX10-NEXT: v_min_i32_e32 v0, s1, v0
+; GFX10-NEXT: v_add_nc_u32_e32 v0, s0, v0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i32 @llvm.sadd.sat.i32(i32 %lhs, i32 %rhs)
+ %cast = bitcast i32 %result to float
+ ret float %cast
+}
+
+define amdgpu_ps float @saddsat_i32_vs(i32 %lhs, i32 inreg %rhs) {
+; GFX6-LABEL: saddsat_i32_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_min_i32_e32 v2, 0, v0
+; GFX6-NEXT: v_max_i32_e32 v1, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, 0x80000000, v2
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, 0x7fffffff, v1
+; GFX6-NEXT: v_max_i32_e32 v2, s0, v2
+; GFX6-NEXT: v_min_i32_e32 v1, v2, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: saddsat_i32_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_min_i32_e32 v2, 0, v0
+; GFX8-NEXT: v_max_i32_e32 v1, 0, v0
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, 0x80000000, v2
+; GFX8-NEXT: v_sub_u32_e32 v1, vcc, 0x7fffffff, v1
+; GFX8-NEXT: v_max_i32_e32 v2, s0, v2
+; GFX8-NEXT: v_min_i32_e32 v1, v2, v1
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: saddsat_i32_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_i32_e32 v2, 0, v0
+; GFX9-NEXT: v_max_i32_e32 v1, 0, v0
+; GFX9-NEXT: v_sub_u32_e32 v2, 0x80000000, v2
+; GFX9-NEXT: v_sub_u32_e32 v1, 0x7fffffff, v1
+; GFX9-NEXT: v_max_i32_e32 v2, s0, v2
+; GFX9-NEXT: v_min_i32_e32 v1, v2, v1
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: saddsat_i32_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_min_i32_e32 v1, 0, v0
+; GFX10-NEXT: v_max_i32_e32 v2, 0, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u32_e32 v1, 0x80000000, v1
+; GFX10-NEXT: v_sub_nc_u32_e32 v2, 0x7fffffff, v2
+; GFX10-NEXT: v_max_i32_e32 v1, s0, v1
+; GFX10-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i32 @llvm.sadd.sat.i32(i32 %lhs, i32 %rhs)
+ %cast = bitcast i32 %result to float
+ ret float %cast
+}
+
+define <2 x i32> @v_saddsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; GFX6-LABEL: v_saddsat_v2i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v5, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, s5, v5
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v4, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s4, v4
+; GFX6-NEXT: v_max_i32_e32 v2, v5, v2
+; GFX6-NEXT: v_min_i32_e32 v2, v2, v4
+; GFX6-NEXT: v_min_i32_e32 v4, 0, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_max_i32_e32 v2, 0, v1
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s5, v4
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s4, v2
+; GFX6-NEXT: v_max_i32_e32 v3, v4, v3
+; GFX6-NEXT: v_min_i32_e32 v2, v3, v2
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_v2i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s5, 0x80000000
+; GFX8-NEXT: v_min_i32_e32 v5, 0, v0
+; GFX8-NEXT: v_sub_u32_e32 v5, vcc, s5, v5
+; GFX8-NEXT: s_brev_b32 s4, -2
+; GFX8-NEXT: v_max_i32_e32 v4, 0, v0
+; GFX8-NEXT: v_sub_u32_e32 v4, vcc, s4, v4
+; GFX8-NEXT: v_max_i32_e32 v2, v5, v2
+; GFX8-NEXT: v_min_i32_e32 v2, v2, v4
+; GFX8-NEXT: v_min_i32_e32 v4, 0, v1
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_max_i32_e32 v2, 0, v1
+; GFX8-NEXT: v_sub_u32_e32 v4, vcc, s5, v4
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, s4, v2
+; GFX8-NEXT: v_max_i32_e32 v3, v4, v3
+; GFX8-NEXT: v_min_i32_e32 v2, v3, v2
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_v2i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s5, 0x80000000
+; GFX9-NEXT: v_min_i32_e32 v5, 0, v0
+; GFX9-NEXT: v_sub_u32_e32 v5, s5, v5
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_max_i32_e32 v4, 0, v0
+; GFX9-NEXT: v_sub_u32_e32 v4, s4, v4
+; GFX9-NEXT: v_max_i32_e32 v2, v5, v2
+; GFX9-NEXT: v_min_i32_e32 v2, v2, v4
+; GFX9-NEXT: v_min_i32_e32 v4, 0, v1
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v2
+; GFX9-NEXT: v_max_i32_e32 v2, 0, v1
+; GFX9-NEXT: v_sub_u32_e32 v4, s5, v4
+; GFX9-NEXT: v_sub_u32_e32 v2, s4, v2
+; GFX9-NEXT: v_max_i32_e32 v3, v4, v3
+; GFX9-NEXT: v_min_i32_e32 v2, v3, v2
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_v2i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_i32_e32 v4, 0, v0
+; GFX10-NEXT: v_min_i32_e32 v5, 0, v1
+; GFX10-NEXT: s_mov_b32 s4, 0x80000000
+; GFX10-NEXT: v_max_i32_e32 v6, 0, v0
+; GFX10-NEXT: v_max_i32_e32 v7, 0, v1
+; GFX10-NEXT: v_sub_nc_u32_e32 v4, s4, v4
+; GFX10-NEXT: v_sub_nc_u32_e32 v5, s4, v5
+; GFX10-NEXT: s_brev_b32 s4, -2
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u32_e32 v6, s4, v6
+; GFX10-NEXT: v_max_i32_e32 v11, v4, v2
+; GFX10-NEXT: v_sub_nc_u32_e32 v7, s4, v7
+; GFX10-NEXT: v_max_i32_e32 v10, v5, v3
+; GFX10-NEXT: v_min_i32_e32 v2, v11, v6
+; GFX10-NEXT: v_min_i32_e32 v3, v10, v7
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+ ret <2 x i32> %result
+}
+
+define amdgpu_ps <2 x i32> @s_saddsat_v2i32(<2 x i32> inreg %lhs, <2 x i32> inreg %rhs) {
+; GFX6-LABEL: s_saddsat_v2i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: s_cselect_b32 s6, s0, 0
+; GFX6-NEXT: s_sub_i32 s6, s4, s6
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s7, s0, 0
+; GFX6-NEXT: s_sub_i32 s7, s5, s7
+; GFX6-NEXT: s_cmp_gt_i32 s7, s2
+; GFX6-NEXT: s_cselect_b32 s2, s7, s2
+; GFX6-NEXT: s_cmp_lt_i32 s2, s6
+; GFX6-NEXT: s_cselect_b32 s2, s2, s6
+; GFX6-NEXT: s_add_i32 s0, s0, s2
+; GFX6-NEXT: s_cmp_gt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s2, s1, 0
+; GFX6-NEXT: s_sub_i32 s2, s4, s2
+; GFX6-NEXT: s_cmp_lt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s4, s1, 0
+; GFX6-NEXT: s_sub_i32 s4, s5, s4
+; GFX6-NEXT: s_cmp_gt_i32 s4, s3
+; GFX6-NEXT: s_cselect_b32 s3, s4, s3
+; GFX6-NEXT: s_cmp_lt_i32 s3, s2
+; GFX6-NEXT: s_cselect_b32 s2, s3, s2
+; GFX6-NEXT: s_add_i32 s1, s1, s2
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_v2i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_gt_i32 s0, 0
+; GFX8-NEXT: s_brev_b32 s4, -2
+; GFX8-NEXT: s_cselect_b32 s6, s0, 0
+; GFX8-NEXT: s_sub_i32 s6, s4, s6
+; GFX8-NEXT: s_cmp_lt_i32 s0, 0
+; GFX8-NEXT: s_mov_b32 s5, 0x80000000
+; GFX8-NEXT: s_cselect_b32 s7, s0, 0
+; GFX8-NEXT: s_sub_i32 s7, s5, s7
+; GFX8-NEXT: s_cmp_gt_i32 s7, s2
+; GFX8-NEXT: s_cselect_b32 s2, s7, s2
+; GFX8-NEXT: s_cmp_lt_i32 s2, s6
+; GFX8-NEXT: s_cselect_b32 s2, s2, s6
+; GFX8-NEXT: s_add_i32 s0, s0, s2
+; GFX8-NEXT: s_cmp_gt_i32 s1, 0
+; GFX8-NEXT: s_cselect_b32 s2, s1, 0
+; GFX8-NEXT: s_sub_i32 s2, s4, s2
+; GFX8-NEXT: s_cmp_lt_i32 s1, 0
+; GFX8-NEXT: s_cselect_b32 s4, s1, 0
+; GFX8-NEXT: s_sub_i32 s4, s5, s4
+; GFX8-NEXT: s_cmp_gt_i32 s4, s3
+; GFX8-NEXT: s_cselect_b32 s3, s4, s3
+; GFX8-NEXT: s_cmp_lt_i32 s3, s2
+; GFX8-NEXT: s_cselect_b32 s2, s3, s2
+; GFX8-NEXT: s_add_i32 s1, s1, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_v2i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_gt_i32 s0, 0
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: s_cselect_b32 s6, s0, 0
+; GFX9-NEXT: s_sub_i32 s6, s4, s6
+; GFX9-NEXT: s_cmp_lt_i32 s0, 0
+; GFX9-NEXT: s_mov_b32 s5, 0x80000000
+; GFX9-NEXT: s_cselect_b32 s7, s0, 0
+; GFX9-NEXT: s_sub_i32 s7, s5, s7
+; GFX9-NEXT: s_cmp_gt_i32 s7, s2
+; GFX9-NEXT: s_cselect_b32 s2, s7, s2
+; GFX9-NEXT: s_cmp_lt_i32 s2, s6
+; GFX9-NEXT: s_cselect_b32 s2, s2, s6
+; GFX9-NEXT: s_add_i32 s0, s0, s2
+; GFX9-NEXT: s_cmp_gt_i32 s1, 0
+; GFX9-NEXT: s_cselect_b32 s2, s1, 0
+; GFX9-NEXT: s_sub_i32 s2, s4, s2
+; GFX9-NEXT: s_cmp_lt_i32 s1, 0
+; GFX9-NEXT: s_cselect_b32 s4, s1, 0
+; GFX9-NEXT: s_sub_i32 s4, s5, s4
+; GFX9-NEXT: s_cmp_gt_i32 s4, s3
+; GFX9-NEXT: s_cselect_b32 s3, s4, s3
+; GFX9-NEXT: s_cmp_lt_i32 s3, s2
+; GFX9-NEXT: s_cselect_b32 s2, s3, s2
+; GFX9-NEXT: s_add_i32 s1, s1, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_v2i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_gt_i32 s0, 0
+; GFX10-NEXT: s_brev_b32 s4, -2
+; GFX10-NEXT: s_cselect_b32 s5, s0, 0
+; GFX10-NEXT: s_mov_b32 s6, 0x80000000
+; GFX10-NEXT: s_sub_i32 s5, s4, s5
+; GFX10-NEXT: s_cmp_lt_i32 s0, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s7, s0, 0
+; GFX10-NEXT: s_sub_i32 s7, s6, s7
+; GFX10-NEXT: s_cmp_gt_i32 s7, s2
+; GFX10-NEXT: s_cselect_b32 s2, s7, s2
+; GFX10-NEXT: s_cmp_lt_i32 s2, s5
+; GFX10-NEXT: s_cselect_b32 s2, s2, s5
+; GFX10-NEXT: s_add_i32 s0, s0, s2
+; GFX10-NEXT: s_cmp_gt_i32 s1, 0
+; GFX10-NEXT: s_cselect_b32 s2, s1, 0
+; GFX10-NEXT: s_sub_i32 s2, s4, s2
+; GFX10-NEXT: s_cmp_lt_i32 s1, 0
+; GFX10-NEXT: s_cselect_b32 s4, s1, 0
+; GFX10-NEXT: s_sub_i32 s4, s6, s4
+; GFX10-NEXT: s_cmp_gt_i32 s4, s3
+; GFX10-NEXT: s_cselect_b32 s3, s4, s3
+; GFX10-NEXT: s_cmp_lt_i32 s3, s2
+; GFX10-NEXT: s_cselect_b32 s2, s3, s2
+; GFX10-NEXT: s_add_i32 s1, s1, s2
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+ ret <2 x i32> %result
+}
+
+define <3 x i32> @v_saddsat_v3i32(<3 x i32> %lhs, <3 x i32> %rhs) {
+; GFX6-LABEL: v_saddsat_v3i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v7, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v7, vcc, s5, v7
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v6, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, s4, v6
+; GFX6-NEXT: v_max_i32_e32 v3, v7, v3
+; GFX6-NEXT: v_min_i32_e32 v3, v3, v6
+; GFX6-NEXT: v_min_i32_e32 v6, 0, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v3
+; GFX6-NEXT: v_max_i32_e32 v3, 0, v1
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, s5, v6
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, s4, v3
+; GFX6-NEXT: v_max_i32_e32 v4, v6, v4
+; GFX6-NEXT: v_min_i32_e32 v3, v4, v3
+; GFX6-NEXT: v_min_i32_e32 v4, 0, v2
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s5, v4
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; GFX6-NEXT: v_max_i32_e32 v3, 0, v2
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, s4, v3
+; GFX6-NEXT: v_max_i32_e32 v4, v4, v5
+; GFX6-NEXT: v_min_i32_e32 v3, v4, v3
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_v3i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s5, 0x80000000
+; GFX8-NEXT: v_min_i32_e32 v7, 0, v0
+; GFX8-NEXT: v_sub_u32_e32 v7, vcc, s5, v7
+; GFX8-NEXT: s_brev_b32 s4, -2
+; GFX8-NEXT: v_max_i32_e32 v6, 0, v0
+; GFX8-NEXT: v_sub_u32_e32 v6, vcc, s4, v6
+; GFX8-NEXT: v_max_i32_e32 v3, v7, v3
+; GFX8-NEXT: v_min_i32_e32 v3, v3, v6
+; GFX8-NEXT: v_min_i32_e32 v6, 0, v1
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v3
+; GFX8-NEXT: v_max_i32_e32 v3, 0, v1
+; GFX8-NEXT: v_sub_u32_e32 v6, vcc, s5, v6
+; GFX8-NEXT: v_sub_u32_e32 v3, vcc, s4, v3
+; GFX8-NEXT: v_max_i32_e32 v4, v6, v4
+; GFX8-NEXT: v_min_i32_e32 v3, v4, v3
+; GFX8-NEXT: v_min_i32_e32 v4, 0, v2
+; GFX8-NEXT: v_sub_u32_e32 v4, vcc, s5, v4
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT: v_max_i32_e32 v3, 0, v2
+; GFX8-NEXT: v_sub_u32_e32 v3, vcc, s4, v3
+; GFX8-NEXT: v_max_i32_e32 v4, v4, v5
+; GFX8-NEXT: v_min_i32_e32 v3, v4, v3
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_v3i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s5, 0x80000000
+; GFX9-NEXT: v_min_i32_e32 v7, 0, v0
+; GFX9-NEXT: v_sub_u32_e32 v7, s5, v7
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_max_i32_e32 v6, 0, v0
+; GFX9-NEXT: v_sub_u32_e32 v6, s4, v6
+; GFX9-NEXT: v_max_i32_e32 v3, v7, v3
+; GFX9-NEXT: v_min_i32_e32 v3, v3, v6
+; GFX9-NEXT: v_min_i32_e32 v6, 0, v1
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v3
+; GFX9-NEXT: v_max_i32_e32 v3, 0, v1
+; GFX9-NEXT: v_sub_u32_e32 v6, s5, v6
+; GFX9-NEXT: v_sub_u32_e32 v3, s4, v3
+; GFX9-NEXT: v_max_i32_e32 v4, v6, v4
+; GFX9-NEXT: v_min_i32_e32 v3, v4, v3
+; GFX9-NEXT: v_min_i32_e32 v4, 0, v2
+; GFX9-NEXT: v_sub_u32_e32 v4, s5, v4
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
+; GFX9-NEXT: v_max_i32_e32 v3, 0, v2
+; GFX9-NEXT: v_sub_u32_e32 v3, s4, v3
+; GFX9-NEXT: v_max_i32_e32 v4, v4, v5
+; GFX9-NEXT: v_min_i32_e32 v3, v4, v3
+; GFX9-NEXT: v_add_u32_e32 v2, v2, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_v3i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_i32_e32 v7, 0, v0
+; GFX10-NEXT: v_min_i32_e32 v8, 0, v1
+; GFX10-NEXT: v_min_i32_e32 v9, 0, v2
+; GFX10-NEXT: s_mov_b32 s5, 0x80000000
+; GFX10-NEXT: v_max_i32_e32 v6, 0, v0
+; GFX10-NEXT: v_sub_nc_u32_e32 v14, s5, v7
+; GFX10-NEXT: v_sub_nc_u32_e32 v15, s5, v8
+; GFX10-NEXT: v_sub_nc_u32_e32 v19, s5, v9
+; GFX10-NEXT: v_max_i32_e32 v10, 0, v1
+; GFX10-NEXT: v_max_i32_e32 v11, 0, v2
+; GFX10-NEXT: s_brev_b32 s4, -2
+; GFX10-NEXT: v_max_i32_e32 v3, v14, v3
+; GFX10-NEXT: v_sub_nc_u32_e32 v6, s4, v6
+; GFX10-NEXT: v_sub_nc_u32_e32 v7, s4, v10
+; GFX10-NEXT: v_max_i32_e32 v4, v15, v4
+; GFX10-NEXT: v_sub_nc_u32_e32 v8, s4, v11
+; GFX10-NEXT: v_max_i32_e32 v5, v19, v5
+; GFX10-NEXT: v_min_i32_e32 v3, v3, v6
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_i32_e32 v4, v4, v7
+; GFX10-NEXT: v_min_i32_e32 v5, v5, v8
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v3
+; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <3 x i32> @llvm.sadd.sat.v3i32(<3 x i32> %lhs, <3 x i32> %rhs)
+ ret <3 x i32> %result
+}
+
+define amdgpu_ps <3 x i32> @s_saddsat_v3i32(<3 x i32> inreg %lhs, <3 x i32> inreg %rhs) {
+; GFX6-LABEL: s_saddsat_v3i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_brev_b32 s6, -2
+; GFX6-NEXT: s_cselect_b32 s8, s0, 0
+; GFX6-NEXT: s_sub_i32 s8, s6, s8
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_mov_b32 s7, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s9, s0, 0
+; GFX6-NEXT: s_sub_i32 s9, s7, s9
+; GFX6-NEXT: s_cmp_gt_i32 s9, s3
+; GFX6-NEXT: s_cselect_b32 s3, s9, s3
+; GFX6-NEXT: s_cmp_lt_i32 s3, s8
+; GFX6-NEXT: s_cselect_b32 s3, s3, s8
+; GFX6-NEXT: s_add_i32 s0, s0, s3
+; GFX6-NEXT: s_cmp_gt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s3, s1, 0
+; GFX6-NEXT: s_sub_i32 s3, s6, s3
+; GFX6-NEXT: s_cmp_lt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s8, s1, 0
+; GFX6-NEXT: s_sub_i32 s8, s7, s8
+; GFX6-NEXT: s_cmp_gt_i32 s8, s4
+; GFX6-NEXT: s_cselect_b32 s4, s8, s4
+; GFX6-NEXT: s_cmp_lt_i32 s4, s3
+; GFX6-NEXT: s_cselect_b32 s3, s4, s3
+; GFX6-NEXT: s_add_i32 s1, s1, s3
+; GFX6-NEXT: s_cmp_gt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s3, s2, 0
+; GFX6-NEXT: s_sub_i32 s3, s6, s3
+; GFX6-NEXT: s_cmp_lt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s4, s2, 0
+; GFX6-NEXT: s_sub_i32 s4, s7, s4
+; GFX6-NEXT: s_cmp_gt_i32 s4, s5
+; GFX6-NEXT: s_cselect_b32 s4, s4, s5
+; GFX6-NEXT: s_cmp_lt_i32 s4, s3
+; GFX6-NEXT: s_cselect_b32 s3, s4, s3
+; GFX6-NEXT: s_add_i32 s2, s2, s3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_v3i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_gt_i32 s0, 0
+; GFX8-NEXT: s_brev_b32 s6, -2
+; GFX8-NEXT: s_cselect_b32 s8, s0, 0
+; GFX8-NEXT: s_sub_i32 s8, s6, s8
+; GFX8-NEXT: s_cmp_lt_i32 s0, 0
+; GFX8-NEXT: s_mov_b32 s7, 0x80000000
+; GFX8-NEXT: s_cselect_b32 s9, s0, 0
+; GFX8-NEXT: s_sub_i32 s9, s7, s9
+; GFX8-NEXT: s_cmp_gt_i32 s9, s3
+; GFX8-NEXT: s_cselect_b32 s3, s9, s3
+; GFX8-NEXT: s_cmp_lt_i32 s3, s8
+; GFX8-NEXT: s_cselect_b32 s3, s3, s8
+; GFX8-NEXT: s_add_i32 s0, s0, s3
+; GFX8-NEXT: s_cmp_gt_i32 s1, 0
+; GFX8-NEXT: s_cselect_b32 s3, s1, 0
+; GFX8-NEXT: s_sub_i32 s3, s6, s3
+; GFX8-NEXT: s_cmp_lt_i32 s1, 0
+; GFX8-NEXT: s_cselect_b32 s8, s1, 0
+; GFX8-NEXT: s_sub_i32 s8, s7, s8
+; GFX8-NEXT: s_cmp_gt_i32 s8, s4
+; GFX8-NEXT: s_cselect_b32 s4, s8, s4
+; GFX8-NEXT: s_cmp_lt_i32 s4, s3
+; GFX8-NEXT: s_cselect_b32 s3, s4, s3
+; GFX8-NEXT: s_add_i32 s1, s1, s3
+; GFX8-NEXT: s_cmp_gt_i32 s2, 0
+; GFX8-NEXT: s_cselect_b32 s3, s2, 0
+; GFX8-NEXT: s_sub_i32 s3, s6, s3
+; GFX8-NEXT: s_cmp_lt_i32 s2, 0
+; GFX8-NEXT: s_cselect_b32 s4, s2, 0
+; GFX8-NEXT: s_sub_i32 s4, s7, s4
+; GFX8-NEXT: s_cmp_gt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_cmp_lt_i32 s4, s3
+; GFX8-NEXT: s_cselect_b32 s3, s4, s3
+; GFX8-NEXT: s_add_i32 s2, s2, s3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_v3i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_gt_i32 s0, 0
+; GFX9-NEXT: s_brev_b32 s6, -2
+; GFX9-NEXT: s_cselect_b32 s8, s0, 0
+; GFX9-NEXT: s_sub_i32 s8, s6, s8
+; GFX9-NEXT: s_cmp_lt_i32 s0, 0
+; GFX9-NEXT: s_mov_b32 s7, 0x80000000
+; GFX9-NEXT: s_cselect_b32 s9, s0, 0
+; GFX9-NEXT: s_sub_i32 s9, s7, s9
+; GFX9-NEXT: s_cmp_gt_i32 s9, s3
+; GFX9-NEXT: s_cselect_b32 s3, s9, s3
+; GFX9-NEXT: s_cmp_lt_i32 s3, s8
+; GFX9-NEXT: s_cselect_b32 s3, s3, s8
+; GFX9-NEXT: s_add_i32 s0, s0, s3
+; GFX9-NEXT: s_cmp_gt_i32 s1, 0
+; GFX9-NEXT: s_cselect_b32 s3, s1, 0
+; GFX9-NEXT: s_sub_i32 s3, s6, s3
+; GFX9-NEXT: s_cmp_lt_i32 s1, 0
+; GFX9-NEXT: s_cselect_b32 s8, s1, 0
+; GFX9-NEXT: s_sub_i32 s8, s7, s8
+; GFX9-NEXT: s_cmp_gt_i32 s8, s4
+; GFX9-NEXT: s_cselect_b32 s4, s8, s4
+; GFX9-NEXT: s_cmp_lt_i32 s4, s3
+; GFX9-NEXT: s_cselect_b32 s3, s4, s3
+; GFX9-NEXT: s_add_i32 s1, s1, s3
+; GFX9-NEXT: s_cmp_gt_i32 s2, 0
+; GFX9-NEXT: s_cselect_b32 s3, s2, 0
+; GFX9-NEXT: s_sub_i32 s3, s6, s3
+; GFX9-NEXT: s_cmp_lt_i32 s2, 0
+; GFX9-NEXT: s_cselect_b32 s4, s2, 0
+; GFX9-NEXT: s_sub_i32 s4, s7, s4
+; GFX9-NEXT: s_cmp_gt_i32 s4, s5
+; GFX9-NEXT: s_cselect_b32 s4, s4, s5
+; GFX9-NEXT: s_cmp_lt_i32 s4, s3
+; GFX9-NEXT: s_cselect_b32 s3, s4, s3
+; GFX9-NEXT: s_add_i32 s2, s2, s3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_v3i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_gt_i32 s0, 0
+; GFX10-NEXT: s_brev_b32 s6, -2
+; GFX10-NEXT: s_cselect_b32 s7, s0, 0
+; GFX10-NEXT: s_mov_b32 s8, 0x80000000
+; GFX10-NEXT: s_sub_i32 s7, s6, s7
+; GFX10-NEXT: s_cmp_lt_i32 s0, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s9, s0, 0
+; GFX10-NEXT: s_sub_i32 s9, s8, s9
+; GFX10-NEXT: s_cmp_gt_i32 s9, s3
+; GFX10-NEXT: s_cselect_b32 s3, s9, s3
+; GFX10-NEXT: s_cmp_lt_i32 s3, s7
+; GFX10-NEXT: s_cselect_b32 s3, s3, s7
+; GFX10-NEXT: s_add_i32 s0, s0, s3
+; GFX10-NEXT: s_cmp_gt_i32 s1, 0
+; GFX10-NEXT: s_cselect_b32 s3, s1, 0
+; GFX10-NEXT: s_sub_i32 s3, s6, s3
+; GFX10-NEXT: s_cmp_lt_i32 s1, 0
+; GFX10-NEXT: s_cselect_b32 s7, s1, 0
+; GFX10-NEXT: s_sub_i32 s7, s8, s7
+; GFX10-NEXT: s_cmp_gt_i32 s7, s4
+; GFX10-NEXT: s_cselect_b32 s4, s7, s4
+; GFX10-NEXT: s_cmp_lt_i32 s4, s3
+; GFX10-NEXT: s_cselect_b32 s3, s4, s3
+; GFX10-NEXT: s_add_i32 s1, s1, s3
+; GFX10-NEXT: s_cmp_gt_i32 s2, 0
+; GFX10-NEXT: s_cselect_b32 s3, s2, 0
+; GFX10-NEXT: s_sub_i32 s3, s6, s3
+; GFX10-NEXT: s_cmp_lt_i32 s2, 0
+; GFX10-NEXT: s_cselect_b32 s4, s2, 0
+; GFX10-NEXT: s_sub_i32 s4, s8, s4
+; GFX10-NEXT: s_cmp_gt_i32 s4, s5
+; GFX10-NEXT: s_cselect_b32 s4, s4, s5
+; GFX10-NEXT: s_cmp_lt_i32 s4, s3
+; GFX10-NEXT: s_cselect_b32 s3, s4, s3
+; GFX10-NEXT: s_add_i32 s2, s2, s3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <3 x i32> @llvm.sadd.sat.v3i32(<3 x i32> %lhs, <3 x i32> %rhs)
+ ret <3 x i32> %result
+}
+
+define <4 x i32> @v_saddsat_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; GFX6-LABEL: v_saddsat_v4i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v9, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, s5, v9
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v8, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, s4, v8
+; GFX6-NEXT: v_max_i32_e32 v4, v9, v4
+; GFX6-NEXT: v_min_i32_e32 v4, v4, v8
+; GFX6-NEXT: v_min_i32_e32 v8, 0, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v4
+; GFX6-NEXT: v_max_i32_e32 v4, 0, v1
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, s5, v8
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s4, v4
+; GFX6-NEXT: v_max_i32_e32 v5, v8, v5
+; GFX6-NEXT: v_min_i32_e32 v4, v5, v4
+; GFX6-NEXT: v_min_i32_e32 v5, 0, v2
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, s5, v5
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v4
+; GFX6-NEXT: v_max_i32_e32 v4, 0, v2
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s4, v4
+; GFX6-NEXT: v_max_i32_e32 v5, v5, v6
+; GFX6-NEXT: v_min_i32_e32 v4, v5, v4
+; GFX6-NEXT: v_min_i32_e32 v5, 0, v3
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, 0x80000000, v5
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4
+; GFX6-NEXT: v_max_i32_e32 v4, 0, v3
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, 0x7fffffff, v4
+; GFX6-NEXT: v_max_i32_e32 v5, v5, v7
+; GFX6-NEXT: v_min_i32_e32 v4, v5, v4
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v4
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_v4i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s5, 0x80000000
+; GFX8-NEXT: v_min_i32_e32 v9, 0, v0
+; GFX8-NEXT: v_sub_u32_e32 v9, vcc, s5, v9
+; GFX8-NEXT: s_brev_b32 s4, -2
+; GFX8-NEXT: v_max_i32_e32 v8, 0, v0
+; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s4, v8
+; GFX8-NEXT: v_max_i32_e32 v4, v9, v4
+; GFX8-NEXT: v_min_i32_e32 v4, v4, v8
+; GFX8-NEXT: v_min_i32_e32 v8, 0, v1
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v4
+; GFX8-NEXT: v_max_i32_e32 v4, 0, v1
+; GFX8-NEXT: v_sub_u32_e32 v8, vcc, s5, v8
+; GFX8-NEXT: v_sub_u32_e32 v4, vcc, s4, v4
+; GFX8-NEXT: v_max_i32_e32 v5, v8, v5
+; GFX8-NEXT: v_min_i32_e32 v4, v5, v4
+; GFX8-NEXT: v_min_i32_e32 v5, 0, v2
+; GFX8-NEXT: v_sub_u32_e32 v5, vcc, s5, v5
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v4
+; GFX8-NEXT: v_max_i32_e32 v4, 0, v2
+; GFX8-NEXT: v_sub_u32_e32 v4, vcc, s4, v4
+; GFX8-NEXT: v_max_i32_e32 v5, v5, v6
+; GFX8-NEXT: v_min_i32_e32 v4, v5, v4
+; GFX8-NEXT: v_min_i32_e32 v5, 0, v3
+; GFX8-NEXT: v_sub_u32_e32 v5, vcc, 0x80000000, v5
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4
+; GFX8-NEXT: v_max_i32_e32 v4, 0, v3
+; GFX8-NEXT: v_sub_u32_e32 v4, vcc, 0x7fffffff, v4
+; GFX8-NEXT: v_max_i32_e32 v5, v5, v7
+; GFX8-NEXT: v_min_i32_e32 v4, v5, v4
+; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v4
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_v4i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s5, 0x80000000
+; GFX9-NEXT: v_min_i32_e32 v9, 0, v0
+; GFX9-NEXT: v_sub_u32_e32 v9, s5, v9
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_max_i32_e32 v8, 0, v0
+; GFX9-NEXT: v_sub_u32_e32 v8, s4, v8
+; GFX9-NEXT: v_max_i32_e32 v4, v9, v4
+; GFX9-NEXT: v_min_i32_e32 v4, v4, v8
+; GFX9-NEXT: v_min_i32_e32 v8, 0, v1
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v4
+; GFX9-NEXT: v_max_i32_e32 v4, 0, v1
+; GFX9-NEXT: v_sub_u32_e32 v8, s5, v8
+; GFX9-NEXT: v_sub_u32_e32 v4, s4, v4
+; GFX9-NEXT: v_max_i32_e32 v5, v8, v5
+; GFX9-NEXT: v_min_i32_e32 v4, v5, v4
+; GFX9-NEXT: v_min_i32_e32 v5, 0, v2
+; GFX9-NEXT: v_sub_u32_e32 v5, s5, v5
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v4
+; GFX9-NEXT: v_max_i32_e32 v4, 0, v2
+; GFX9-NEXT: v_sub_u32_e32 v4, s4, v4
+; GFX9-NEXT: v_max_i32_e32 v5, v5, v6
+; GFX9-NEXT: v_min_i32_e32 v4, v5, v4
+; GFX9-NEXT: v_min_i32_e32 v5, 0, v3
+; GFX9-NEXT: v_sub_u32_e32 v5, 0x80000000, v5
+; GFX9-NEXT: v_add_u32_e32 v2, v2, v4
+; GFX9-NEXT: v_max_i32_e32 v4, 0, v3
+; GFX9-NEXT: v_sub_u32_e32 v4, 0x7fffffff, v4
+; GFX9-NEXT: v_max_i32_e32 v5, v5, v7
+; GFX9-NEXT: v_min_i32_e32 v4, v5, v4
+; GFX9-NEXT: v_add_u32_e32 v3, v3, v4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_v4i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_i32_e32 v8, 0, v0
+; GFX10-NEXT: s_mov_b32 s4, 0x80000000
+; GFX10-NEXT: v_min_i32_e32 v11, 0, v1
+; GFX10-NEXT: v_min_i32_e32 v12, 0, v3
+; GFX10-NEXT: v_max_i32_e32 v9, 0, v0
+; GFX10-NEXT: v_sub_nc_u32_e32 v15, s4, v8
+; GFX10-NEXT: v_min_i32_e32 v8, 0, v2
+; GFX10-NEXT: v_sub_nc_u32_e32 v11, s4, v11
+; GFX10-NEXT: v_sub_nc_u32_e32 v12, 0x80000000, v12
+; GFX10-NEXT: v_max_i32_e32 v10, 0, v1
+; GFX10-NEXT: v_max_i32_e32 v13, 0, v2
+; GFX10-NEXT: v_sub_nc_u32_e32 v8, s4, v8
+; GFX10-NEXT: v_max_i32_e32 v14, 0, v3
+; GFX10-NEXT: s_brev_b32 s5, -2
+; GFX10-NEXT: v_max_i32_e32 v5, v11, v5
+; GFX10-NEXT: v_sub_nc_u32_e32 v10, s5, v10
+; GFX10-NEXT: v_max_i32_e32 v6, v8, v6
+; GFX10-NEXT: v_sub_nc_u32_e32 v11, s5, v13
+; GFX10-NEXT: v_sub_nc_u32_e32 v9, s5, v9
+; GFX10-NEXT: v_max_i32_e32 v4, v15, v4
+; GFX10-NEXT: v_sub_nc_u32_e32 v8, 0x7fffffff, v14
+; GFX10-NEXT: v_max_i32_e32 v7, v12, v7
+; GFX10-NEXT: v_min_i32_e32 v11, v6, v11
+; GFX10-NEXT: v_min_i32_e32 v19, v5, v10
+; GFX10-NEXT: v_min_i32_e32 v15, v4, v9
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_i32_e32 v6, v7, v8
+; GFX10-NEXT: v_add_nc_u32_e32 v2, v2, v11
+; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v19
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v15
+; GFX10-NEXT: v_add_nc_u32_e32 v3, v3, v6
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+ ret <4 x i32> %result
+}
+
+define amdgpu_ps <4 x i32> @s_saddsat_v4i32(<4 x i32> inreg %lhs, <4 x i32> inreg %rhs) {
+; GFX6-LABEL: s_saddsat_v4i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_brev_b32 s8, -2
+; GFX6-NEXT: s_cselect_b32 s10, s0, 0
+; GFX6-NEXT: s_sub_i32 s10, s8, s10
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_mov_b32 s9, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s11, s0, 0
+; GFX6-NEXT: s_sub_i32 s11, s9, s11
+; GFX6-NEXT: s_cmp_gt_i32 s11, s4
+; GFX6-NEXT: s_cselect_b32 s4, s11, s4
+; GFX6-NEXT: s_cmp_lt_i32 s4, s10
+; GFX6-NEXT: s_cselect_b32 s4, s4, s10
+; GFX6-NEXT: s_add_i32 s0, s0, s4
+; GFX6-NEXT: s_cmp_gt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s4, s1, 0
+; GFX6-NEXT: s_sub_i32 s4, s8, s4
+; GFX6-NEXT: s_cmp_lt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s10, s1, 0
+; GFX6-NEXT: s_sub_i32 s10, s9, s10
+; GFX6-NEXT: s_cmp_gt_i32 s10, s5
+; GFX6-NEXT: s_cselect_b32 s5, s10, s5
+; GFX6-NEXT: s_cmp_lt_i32 s5, s4
+; GFX6-NEXT: s_cselect_b32 s4, s5, s4
+; GFX6-NEXT: s_add_i32 s1, s1, s4
+; GFX6-NEXT: s_cmp_gt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s4, s2, 0
+; GFX6-NEXT: s_sub_i32 s4, s8, s4
+; GFX6-NEXT: s_cmp_lt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s5, s2, 0
+; GFX6-NEXT: s_sub_i32 s5, s9, s5
+; GFX6-NEXT: s_cmp_gt_i32 s5, s6
+; GFX6-NEXT: s_cselect_b32 s5, s5, s6
+; GFX6-NEXT: s_cmp_lt_i32 s5, s4
+; GFX6-NEXT: s_cselect_b32 s4, s5, s4
+; GFX6-NEXT: s_add_i32 s2, s2, s4
+; GFX6-NEXT: s_cmp_gt_i32 s3, 0
+; GFX6-NEXT: s_cselect_b32 s4, s3, 0
+; GFX6-NEXT: s_sub_i32 s4, s8, s4
+; GFX6-NEXT: s_cmp_lt_i32 s3, 0
+; GFX6-NEXT: s_cselect_b32 s5, s3, 0
+; GFX6-NEXT: s_sub_i32 s5, s9, s5
+; GFX6-NEXT: s_cmp_gt_i32 s5, s7
+; GFX6-NEXT: s_cselect_b32 s5, s5, s7
+; GFX6-NEXT: s_cmp_lt_i32 s5, s4
+; GFX6-NEXT: s_cselect_b32 s4, s5, s4
+; GFX6-NEXT: s_add_i32 s3, s3, s4
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_v4i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_gt_i32 s0, 0
+; GFX8-NEXT: s_brev_b32 s8, -2
+; GFX8-NEXT: s_cselect_b32 s10, s0, 0
+; GFX8-NEXT: s_sub_i32 s10, s8, s10
+; GFX8-NEXT: s_cmp_lt_i32 s0, 0
+; GFX8-NEXT: s_mov_b32 s9, 0x80000000
+; GFX8-NEXT: s_cselect_b32 s11, s0, 0
+; GFX8-NEXT: s_sub_i32 s11, s9, s11
+; GFX8-NEXT: s_cmp_gt_i32 s11, s4
+; GFX8-NEXT: s_cselect_b32 s4, s11, s4
+; GFX8-NEXT: s_cmp_lt_i32 s4, s10
+; GFX8-NEXT: s_cselect_b32 s4, s4, s10
+; GFX8-NEXT: s_add_i32 s0, s0, s4
+; GFX8-NEXT: s_cmp_gt_i32 s1, 0
+; GFX8-NEXT: s_cselect_b32 s4, s1, 0
+; GFX8-NEXT: s_sub_i32 s4, s8, s4
+; GFX8-NEXT: s_cmp_lt_i32 s1, 0
+; GFX8-NEXT: s_cselect_b32 s10, s1, 0
+; GFX8-NEXT: s_sub_i32 s10, s9, s10
+; GFX8-NEXT: s_cmp_gt_i32 s10, s5
+; GFX8-NEXT: s_cselect_b32 s5, s10, s5
+; GFX8-NEXT: s_cmp_lt_i32 s5, s4
+; GFX8-NEXT: s_cselect_b32 s4, s5, s4
+; GFX8-NEXT: s_add_i32 s1, s1, s4
+; GFX8-NEXT: s_cmp_gt_i32 s2, 0
+; GFX8-NEXT: s_cselect_b32 s4, s2, 0
+; GFX8-NEXT: s_sub_i32 s4, s8, s4
+; GFX8-NEXT: s_cmp_lt_i32 s2, 0
+; GFX8-NEXT: s_cselect_b32 s5, s2, 0
+; GFX8-NEXT: s_sub_i32 s5, s9, s5
+; GFX8-NEXT: s_cmp_gt_i32 s5, s6
+; GFX8-NEXT: s_cselect_b32 s5, s5, s6
+; GFX8-NEXT: s_cmp_lt_i32 s5, s4
+; GFX8-NEXT: s_cselect_b32 s4, s5, s4
+; GFX8-NEXT: s_add_i32 s2, s2, s4
+; GFX8-NEXT: s_cmp_gt_i32 s3, 0
+; GFX8-NEXT: s_cselect_b32 s4, s3, 0
+; GFX8-NEXT: s_sub_i32 s4, s8, s4
+; GFX8-NEXT: s_cmp_lt_i32 s3, 0
+; GFX8-NEXT: s_cselect_b32 s5, s3, 0
+; GFX8-NEXT: s_sub_i32 s5, s9, s5
+; GFX8-NEXT: s_cmp_gt_i32 s5, s7
+; GFX8-NEXT: s_cselect_b32 s5, s5, s7
+; GFX8-NEXT: s_cmp_lt_i32 s5, s4
+; GFX8-NEXT: s_cselect_b32 s4, s5, s4
+; GFX8-NEXT: s_add_i32 s3, s3, s4
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_v4i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_gt_i32 s0, 0
+; GFX9-NEXT: s_brev_b32 s8, -2
+; GFX9-NEXT: s_cselect_b32 s10, s0, 0
+; GFX9-NEXT: s_sub_i32 s10, s8, s10
+; GFX9-NEXT: s_cmp_lt_i32 s0, 0
+; GFX9-NEXT: s_mov_b32 s9, 0x80000000
+; GFX9-NEXT: s_cselect_b32 s11, s0, 0
+; GFX9-NEXT: s_sub_i32 s11, s9, s11
+; GFX9-NEXT: s_cmp_gt_i32 s11, s4
+; GFX9-NEXT: s_cselect_b32 s4, s11, s4
+; GFX9-NEXT: s_cmp_lt_i32 s4, s10
+; GFX9-NEXT: s_cselect_b32 s4, s4, s10
+; GFX9-NEXT: s_add_i32 s0, s0, s4
+; GFX9-NEXT: s_cmp_gt_i32 s1, 0
+; GFX9-NEXT: s_cselect_b32 s4, s1, 0
+; GFX9-NEXT: s_sub_i32 s4, s8, s4
+; GFX9-NEXT: s_cmp_lt_i32 s1, 0
+; GFX9-NEXT: s_cselect_b32 s10, s1, 0
+; GFX9-NEXT: s_sub_i32 s10, s9, s10
+; GFX9-NEXT: s_cmp_gt_i32 s10, s5
+; GFX9-NEXT: s_cselect_b32 s5, s10, s5
+; GFX9-NEXT: s_cmp_lt_i32 s5, s4
+; GFX9-NEXT: s_cselect_b32 s4, s5, s4
+; GFX9-NEXT: s_add_i32 s1, s1, s4
+; GFX9-NEXT: s_cmp_gt_i32 s2, 0
+; GFX9-NEXT: s_cselect_b32 s4, s2, 0
+; GFX9-NEXT: s_sub_i32 s4, s8, s4
+; GFX9-NEXT: s_cmp_lt_i32 s2, 0
+; GFX9-NEXT: s_cselect_b32 s5, s2, 0
+; GFX9-NEXT: s_sub_i32 s5, s9, s5
+; GFX9-NEXT: s_cmp_gt_i32 s5, s6
+; GFX9-NEXT: s_cselect_b32 s5, s5, s6
+; GFX9-NEXT: s_cmp_lt_i32 s5, s4
+; GFX9-NEXT: s_cselect_b32 s4, s5, s4
+; GFX9-NEXT: s_add_i32 s2, s2, s4
+; GFX9-NEXT: s_cmp_gt_i32 s3, 0
+; GFX9-NEXT: s_cselect_b32 s4, s3, 0
+; GFX9-NEXT: s_sub_i32 s4, s8, s4
+; GFX9-NEXT: s_cmp_lt_i32 s3, 0
+; GFX9-NEXT: s_cselect_b32 s5, s3, 0
+; GFX9-NEXT: s_sub_i32 s5, s9, s5
+; GFX9-NEXT: s_cmp_gt_i32 s5, s7
+; GFX9-NEXT: s_cselect_b32 s5, s5, s7
+; GFX9-NEXT: s_cmp_lt_i32 s5, s4
+; GFX9-NEXT: s_cselect_b32 s4, s5, s4
+; GFX9-NEXT: s_add_i32 s3, s3, s4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_v4i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_gt_i32 s0, 0
+; GFX10-NEXT: s_brev_b32 s8, -2
+; GFX10-NEXT: s_cselect_b32 s9, s0, 0
+; GFX10-NEXT: s_mov_b32 s10, 0x80000000
+; GFX10-NEXT: s_sub_i32 s9, s8, s9
+; GFX10-NEXT: s_cmp_lt_i32 s0, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s11, s0, 0
+; GFX10-NEXT: s_sub_i32 s11, s10, s11
+; GFX10-NEXT: s_cmp_gt_i32 s11, s4
+; GFX10-NEXT: s_cselect_b32 s4, s11, s4
+; GFX10-NEXT: s_cmp_lt_i32 s4, s9
+; GFX10-NEXT: s_cselect_b32 s4, s4, s9
+; GFX10-NEXT: s_add_i32 s0, s0, s4
+; GFX10-NEXT: s_cmp_gt_i32 s1, 0
+; GFX10-NEXT: s_cselect_b32 s4, s1, 0
+; GFX10-NEXT: s_sub_i32 s4, s8, s4
+; GFX10-NEXT: s_cmp_lt_i32 s1, 0
+; GFX10-NEXT: s_cselect_b32 s9, s1, 0
+; GFX10-NEXT: s_sub_i32 s9, s10, s9
+; GFX10-NEXT: s_cmp_gt_i32 s9, s5
+; GFX10-NEXT: s_cselect_b32 s5, s9, s5
+; GFX10-NEXT: s_cmp_lt_i32 s5, s4
+; GFX10-NEXT: s_cselect_b32 s4, s5, s4
+; GFX10-NEXT: s_add_i32 s1, s1, s4
+; GFX10-NEXT: s_cmp_gt_i32 s2, 0
+; GFX10-NEXT: s_cselect_b32 s4, s2, 0
+; GFX10-NEXT: s_sub_i32 s4, s8, s4
+; GFX10-NEXT: s_cmp_lt_i32 s2, 0
+; GFX10-NEXT: s_cselect_b32 s5, s2, 0
+; GFX10-NEXT: s_sub_i32 s5, s10, s5
+; GFX10-NEXT: s_cmp_gt_i32 s5, s6
+; GFX10-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-NEXT: s_cmp_lt_i32 s5, s4
+; GFX10-NEXT: s_cselect_b32 s4, s5, s4
+; GFX10-NEXT: s_add_i32 s2, s2, s4
+; GFX10-NEXT: s_cmp_gt_i32 s3, 0
+; GFX10-NEXT: s_cselect_b32 s4, s3, 0
+; GFX10-NEXT: s_sub_i32 s4, s8, s4
+; GFX10-NEXT: s_cmp_lt_i32 s3, 0
+; GFX10-NEXT: s_cselect_b32 s5, s3, 0
+; GFX10-NEXT: s_sub_i32 s5, s10, s5
+; GFX10-NEXT: s_cmp_gt_i32 s5, s7
+; GFX10-NEXT: s_cselect_b32 s5, s5, s7
+; GFX10-NEXT: s_cmp_lt_i32 s5, s4
+; GFX10-NEXT: s_cselect_b32 s4, s5, s4
+; GFX10-NEXT: s_add_i32 s3, s3, s4
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+ ret <4 x i32> %result
+}
+
+define <5 x i32> @v_saddsat_v5i32(<5 x i32> %lhs, <5 x i32> %rhs) {
+; GFX6-LABEL: v_saddsat_v5i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v12, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v12, vcc, s5, v12
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v10, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, s4, v10
+; GFX6-NEXT: v_max_i32_e32 v5, v12, v5
+; GFX6-NEXT: v_min_i32_e32 v5, v5, v10
+; GFX6-NEXT: v_min_i32_e32 v10, 0, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v5
+; GFX6-NEXT: v_max_i32_e32 v5, 0, v1
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, s5, v10
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, s4, v5
+; GFX6-NEXT: v_max_i32_e32 v6, v10, v6
+; GFX6-NEXT: v_min_i32_e32 v5, v6, v5
+; GFX6-NEXT: v_min_i32_e32 v6, 0, v2
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, s5, v6
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v5
+; GFX6-NEXT: v_max_i32_e32 v5, 0, v2
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, s4, v5
+; GFX6-NEXT: v_max_i32_e32 v6, v6, v7
+; GFX6-NEXT: v_min_i32_e32 v5, v6, v5
+; GFX6-NEXT: v_mov_b32_e32 v13, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v6, 0, v3
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, v13, v6
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v5
+; GFX6-NEXT: v_bfrev_b32_e32 v11, -2
+; GFX6-NEXT: v_max_i32_e32 v5, 0, v3
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v11, v5
+; GFX6-NEXT: v_max_i32_e32 v6, v6, v8
+; GFX6-NEXT: v_min_i32_e32 v5, v6, v5
+; GFX6-NEXT: v_min_i32_e32 v6, 0, v4
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, v13, v6
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v5
+; GFX6-NEXT: v_max_i32_e32 v5, 0, v4
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v11, v5
+; GFX6-NEXT: v_max_i32_e32 v6, v6, v9
+; GFX6-NEXT: v_min_i32_e32 v5, v6, v5
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v5
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_v5i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s5, 0x80000000
+; GFX8-NEXT: v_min_i32_e32 v12, 0, v0
+; GFX8-NEXT: v_sub_u32_e32 v12, vcc, s5, v12
+; GFX8-NEXT: s_brev_b32 s4, -2
+; GFX8-NEXT: v_max_i32_e32 v10, 0, v0
+; GFX8-NEXT: v_sub_u32_e32 v10, vcc, s4, v10
+; GFX8-NEXT: v_max_i32_e32 v5, v12, v5
+; GFX8-NEXT: v_min_i32_e32 v5, v5, v10
+; GFX8-NEXT: v_min_i32_e32 v10, 0, v1
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v5
+; GFX8-NEXT: v_max_i32_e32 v5, 0, v1
+; GFX8-NEXT: v_sub_u32_e32 v10, vcc, s5, v10
+; GFX8-NEXT: v_sub_u32_e32 v5, vcc, s4, v5
+; GFX8-NEXT: v_max_i32_e32 v6, v10, v6
+; GFX8-NEXT: v_min_i32_e32 v5, v6, v5
+; GFX8-NEXT: v_min_i32_e32 v6, 0, v2
+; GFX8-NEXT: v_sub_u32_e32 v6, vcc, s5, v6
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v5
+; GFX8-NEXT: v_max_i32_e32 v5, 0, v2
+; GFX8-NEXT: v_sub_u32_e32 v5, vcc, s4, v5
+; GFX8-NEXT: v_max_i32_e32 v6, v6, v7
+; GFX8-NEXT: v_min_i32_e32 v5, v6, v5
+; GFX8-NEXT: v_mov_b32_e32 v13, 0x80000000
+; GFX8-NEXT: v_min_i32_e32 v6, 0, v3
+; GFX8-NEXT: v_sub_u32_e32 v6, vcc, v13, v6
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v5
+; GFX8-NEXT: v_bfrev_b32_e32 v11, -2
+; GFX8-NEXT: v_max_i32_e32 v5, 0, v3
+; GFX8-NEXT: v_sub_u32_e32 v5, vcc, v11, v5
+; GFX8-NEXT: v_max_i32_e32 v6, v6, v8
+; GFX8-NEXT: v_min_i32_e32 v5, v6, v5
+; GFX8-NEXT: v_min_i32_e32 v6, 0, v4
+; GFX8-NEXT: v_sub_u32_e32 v6, vcc, v13, v6
+; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v5
+; GFX8-NEXT: v_max_i32_e32 v5, 0, v4
+; GFX8-NEXT: v_sub_u32_e32 v5, vcc, v11, v5
+; GFX8-NEXT: v_max_i32_e32 v6, v6, v9
+; GFX8-NEXT: v_min_i32_e32 v5, v6, v5
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v5
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_v5i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s5, 0x80000000
+; GFX9-NEXT: v_min_i32_e32 v12, 0, v0
+; GFX9-NEXT: v_sub_u32_e32 v12, s5, v12
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_max_i32_e32 v10, 0, v0
+; GFX9-NEXT: v_sub_u32_e32 v10, s4, v10
+; GFX9-NEXT: v_max_i32_e32 v5, v12, v5
+; GFX9-NEXT: v_min_i32_e32 v5, v5, v10
+; GFX9-NEXT: v_min_i32_e32 v10, 0, v1
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v5
+; GFX9-NEXT: v_max_i32_e32 v5, 0, v1
+; GFX9-NEXT: v_sub_u32_e32 v10, s5, v10
+; GFX9-NEXT: v_sub_u32_e32 v5, s4, v5
+; GFX9-NEXT: v_max_i32_e32 v6, v10, v6
+; GFX9-NEXT: v_min_i32_e32 v5, v6, v5
+; GFX9-NEXT: v_min_i32_e32 v6, 0, v2
+; GFX9-NEXT: v_sub_u32_e32 v6, s5, v6
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v5
+; GFX9-NEXT: v_max_i32_e32 v5, 0, v2
+; GFX9-NEXT: v_sub_u32_e32 v5, s4, v5
+; GFX9-NEXT: v_max_i32_e32 v6, v6, v7
+; GFX9-NEXT: v_min_i32_e32 v5, v6, v5
+; GFX9-NEXT: v_mov_b32_e32 v13, 0x80000000
+; GFX9-NEXT: v_min_i32_e32 v6, 0, v3
+; GFX9-NEXT: v_sub_u32_e32 v6, v13, v6
+; GFX9-NEXT: v_add_u32_e32 v2, v2, v5
+; GFX9-NEXT: v_bfrev_b32_e32 v11, -2
+; GFX9-NEXT: v_max_i32_e32 v5, 0, v3
+; GFX9-NEXT: v_sub_u32_e32 v5, v11, v5
+; GFX9-NEXT: v_max_i32_e32 v6, v6, v8
+; GFX9-NEXT: v_min_i32_e32 v5, v6, v5
+; GFX9-NEXT: v_min_i32_e32 v6, 0, v4
+; GFX9-NEXT: v_sub_u32_e32 v6, v13, v6
+; GFX9-NEXT: v_add_u32_e32 v3, v3, v5
+; GFX9-NEXT: v_max_i32_e32 v5, 0, v4
+; GFX9-NEXT: v_sub_u32_e32 v5, v11, v5
+; GFX9-NEXT: v_max_i32_e32 v6, v6, v9
+; GFX9-NEXT: v_min_i32_e32 v5, v6, v5
+; GFX9-NEXT: v_add_u32_e32 v4, v4, v5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_v5i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_i32_e32 v13, 0, v1
+; GFX10-NEXT: s_mov_b32 s5, 0x80000000
+; GFX10-NEXT: v_min_i32_e32 v10, 0, v0
+; GFX10-NEXT: v_min_i32_e32 v16, 0, v2
+; GFX10-NEXT: v_mov_b32_e32 v15, 0x80000000
+; GFX10-NEXT: v_sub_nc_u32_e32 v13, s5, v13
+; GFX10-NEXT: v_min_i32_e32 v17, 0, v4
+; GFX10-NEXT: v_sub_nc_u32_e32 v10, s5, v10
+; GFX10-NEXT: v_sub_nc_u32_e32 v16, s5, v16
+; GFX10-NEXT: v_max_i32_e32 v11, 0, v0
+; GFX10-NEXT: v_max_i32_e32 v23, v13, v6
+; GFX10-NEXT: v_min_i32_e32 v13, 0, v3
+; GFX10-NEXT: v_max_i32_e32 v5, v10, v5
+; GFX10-NEXT: v_bfrev_b32_e32 v12, -2
+; GFX10-NEXT: v_max_i32_e32 v14, 0, v1
+; GFX10-NEXT: v_max_i32_e32 v10, 0, v2
+; GFX10-NEXT: v_sub_nc_u32_e32 v13, v15, v13
+; GFX10-NEXT: v_sub_nc_u32_e32 v15, v15, v17
+; GFX10-NEXT: v_max_i32_e32 v18, 0, v3
+; GFX10-NEXT: v_max_i32_e32 v19, 0, v4
+; GFX10-NEXT: s_brev_b32 s4, -2
+; GFX10-NEXT: v_max_i32_e32 v7, v16, v7
+; GFX10-NEXT: v_sub_nc_u32_e32 v11, s4, v11
+; GFX10-NEXT: v_sub_nc_u32_e32 v14, s4, v14
+; GFX10-NEXT: v_sub_nc_u32_e32 v10, s4, v10
+; GFX10-NEXT: v_sub_nc_u32_e32 v16, v12, v18
+; GFX10-NEXT: v_max_i32_e32 v27, v13, v8
+; GFX10-NEXT: v_sub_nc_u32_e32 v12, v12, v19
+; GFX10-NEXT: v_max_i32_e32 v9, v15, v9
+; GFX10-NEXT: v_min_i32_e32 v5, v5, v11
+; GFX10-NEXT: v_min_i32_e32 v6, v23, v14
+; GFX10-NEXT: v_min_i32_e32 v7, v7, v10
+; GFX10-NEXT: v_min_i32_e32 v8, v27, v16
+; GFX10-NEXT: v_min_i32_e32 v9, v9, v12
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v5
+; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v6
+; GFX10-NEXT: v_add_nc_u32_e32 v2, v2, v7
+; GFX10-NEXT: v_add_nc_u32_e32 v3, v3, v8
+; GFX10-NEXT: v_add_nc_u32_e32 v4, v4, v9
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <5 x i32> @llvm.sadd.sat.v5i32(<5 x i32> %lhs, <5 x i32> %rhs)
+ ret <5 x i32> %result
+}
+
+define amdgpu_ps <5 x i32> @s_saddsat_v5i32(<5 x i32> inreg %lhs, <5 x i32> inreg %rhs) {
+; GFX6-LABEL: s_saddsat_v5i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_brev_b32 s10, -2
+; GFX6-NEXT: s_cselect_b32 s12, s0, 0
+; GFX6-NEXT: s_sub_i32 s12, s10, s12
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_mov_b32 s11, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s13, s0, 0
+; GFX6-NEXT: s_sub_i32 s13, s11, s13
+; GFX6-NEXT: s_cmp_gt_i32 s13, s5
+; GFX6-NEXT: s_cselect_b32 s5, s13, s5
+; GFX6-NEXT: s_cmp_lt_i32 s5, s12
+; GFX6-NEXT: s_cselect_b32 s5, s5, s12
+; GFX6-NEXT: s_add_i32 s0, s0, s5
+; GFX6-NEXT: s_cmp_gt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s5, s1, 0
+; GFX6-NEXT: s_sub_i32 s5, s10, s5
+; GFX6-NEXT: s_cmp_lt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s12, s1, 0
+; GFX6-NEXT: s_sub_i32 s12, s11, s12
+; GFX6-NEXT: s_cmp_gt_i32 s12, s6
+; GFX6-NEXT: s_cselect_b32 s6, s12, s6
+; GFX6-NEXT: s_cmp_lt_i32 s6, s5
+; GFX6-NEXT: s_cselect_b32 s5, s6, s5
+; GFX6-NEXT: s_add_i32 s1, s1, s5
+; GFX6-NEXT: s_cmp_gt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s5, s2, 0
+; GFX6-NEXT: s_sub_i32 s5, s10, s5
+; GFX6-NEXT: s_cmp_lt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s6, s2, 0
+; GFX6-NEXT: s_sub_i32 s6, s11, s6
+; GFX6-NEXT: s_cmp_gt_i32 s6, s7
+; GFX6-NEXT: s_cselect_b32 s6, s6, s7
+; GFX6-NEXT: s_cmp_lt_i32 s6, s5
+; GFX6-NEXT: s_cselect_b32 s5, s6, s5
+; GFX6-NEXT: s_add_i32 s2, s2, s5
+; GFX6-NEXT: s_cmp_gt_i32 s3, 0
+; GFX6-NEXT: s_cselect_b32 s5, s3, 0
+; GFX6-NEXT: s_sub_i32 s5, s10, s5
+; GFX6-NEXT: s_cmp_lt_i32 s3, 0
+; GFX6-NEXT: s_cselect_b32 s6, s3, 0
+; GFX6-NEXT: s_sub_i32 s6, s11, s6
+; GFX6-NEXT: s_cmp_gt_i32 s6, s8
+; GFX6-NEXT: s_cselect_b32 s6, s6, s8
+; GFX6-NEXT: s_cmp_lt_i32 s6, s5
+; GFX6-NEXT: s_cselect_b32 s5, s6, s5
+; GFX6-NEXT: s_add_i32 s3, s3, s5
+; GFX6-NEXT: s_cmp_gt_i32 s4, 0
+; GFX6-NEXT: s_cselect_b32 s5, s4, 0
+; GFX6-NEXT: s_sub_i32 s5, s10, s5
+; GFX6-NEXT: s_cmp_lt_i32 s4, 0
+; GFX6-NEXT: s_cselect_b32 s6, s4, 0
+; GFX6-NEXT: s_sub_i32 s6, s11, s6
+; GFX6-NEXT: s_cmp_gt_i32 s6, s9
+; GFX6-NEXT: s_cselect_b32 s6, s6, s9
+; GFX6-NEXT: s_cmp_lt_i32 s6, s5
+; GFX6-NEXT: s_cselect_b32 s5, s6, s5
+; GFX6-NEXT: s_add_i32 s4, s4, s5
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_v5i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_gt_i32 s0, 0
+; GFX8-NEXT: s_brev_b32 s10, -2
+; GFX8-NEXT: s_cselect_b32 s12, s0, 0
+; GFX8-NEXT: s_sub_i32 s12, s10, s12
+; GFX8-NEXT: s_cmp_lt_i32 s0, 0
+; GFX8-NEXT: s_mov_b32 s11, 0x80000000
+; GFX8-NEXT: s_cselect_b32 s13, s0, 0
+; GFX8-NEXT: s_sub_i32 s13, s11, s13
+; GFX8-NEXT: s_cmp_gt_i32 s13, s5
+; GFX8-NEXT: s_cselect_b32 s5, s13, s5
+; GFX8-NEXT: s_cmp_lt_i32 s5, s12
+; GFX8-NEXT: s_cselect_b32 s5, s5, s12
+; GFX8-NEXT: s_add_i32 s0, s0, s5
+; GFX8-NEXT: s_cmp_gt_i32 s1, 0
+; GFX8-NEXT: s_cselect_b32 s5, s1, 0
+; GFX8-NEXT: s_sub_i32 s5, s10, s5
+; GFX8-NEXT: s_cmp_lt_i32 s1, 0
+; GFX8-NEXT: s_cselect_b32 s12, s1, 0
+; GFX8-NEXT: s_sub_i32 s12, s11, s12
+; GFX8-NEXT: s_cmp_gt_i32 s12, s6
+; GFX8-NEXT: s_cselect_b32 s6, s12, s6
+; GFX8-NEXT: s_cmp_lt_i32 s6, s5
+; GFX8-NEXT: s_cselect_b32 s5, s6, s5
+; GFX8-NEXT: s_add_i32 s1, s1, s5
+; GFX8-NEXT: s_cmp_gt_i32 s2, 0
+; GFX8-NEXT: s_cselect_b32 s5, s2, 0
+; GFX8-NEXT: s_sub_i32 s5, s10, s5
+; GFX8-NEXT: s_cmp_lt_i32 s2, 0
+; GFX8-NEXT: s_cselect_b32 s6, s2, 0
+; GFX8-NEXT: s_sub_i32 s6, s11, s6
+; GFX8-NEXT: s_cmp_gt_i32 s6, s7
+; GFX8-NEXT: s_cselect_b32 s6, s6, s7
+; GFX8-NEXT: s_cmp_lt_i32 s6, s5
+; GFX8-NEXT: s_cselect_b32 s5, s6, s5
+; GFX8-NEXT: s_add_i32 s2, s2, s5
+; GFX8-NEXT: s_cmp_gt_i32 s3, 0
+; GFX8-NEXT: s_cselect_b32 s5, s3, 0
+; GFX8-NEXT: s_sub_i32 s5, s10, s5
+; GFX8-NEXT: s_cmp_lt_i32 s3, 0
+; GFX8-NEXT: s_cselect_b32 s6, s3, 0
+; GFX8-NEXT: s_sub_i32 s6, s11, s6
+; GFX8-NEXT: s_cmp_gt_i32 s6, s8
+; GFX8-NEXT: s_cselect_b32 s6, s6, s8
+; GFX8-NEXT: s_cmp_lt_i32 s6, s5
+; GFX8-NEXT: s_cselect_b32 s5, s6, s5
+; GFX8-NEXT: s_add_i32 s3, s3, s5
+; GFX8-NEXT: s_cmp_gt_i32 s4, 0
+; GFX8-NEXT: s_cselect_b32 s5, s4, 0
+; GFX8-NEXT: s_sub_i32 s5, s10, s5
+; GFX8-NEXT: s_cmp_lt_i32 s4, 0
+; GFX8-NEXT: s_cselect_b32 s6, s4, 0
+; GFX8-NEXT: s_sub_i32 s6, s11, s6
+; GFX8-NEXT: s_cmp_gt_i32 s6, s9
+; GFX8-NEXT: s_cselect_b32 s6, s6, s9
+; GFX8-NEXT: s_cmp_lt_i32 s6, s5
+; GFX8-NEXT: s_cselect_b32 s5, s6, s5
+; GFX8-NEXT: s_add_i32 s4, s4, s5
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_v5i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_gt_i32 s0, 0
+; GFX9-NEXT: s_brev_b32 s10, -2
+; GFX9-NEXT: s_cselect_b32 s12, s0, 0
+; GFX9-NEXT: s_sub_i32 s12, s10, s12
+; GFX9-NEXT: s_cmp_lt_i32 s0, 0
+; GFX9-NEXT: s_mov_b32 s11, 0x80000000
+; GFX9-NEXT: s_cselect_b32 s13, s0, 0
+; GFX9-NEXT: s_sub_i32 s13, s11, s13
+; GFX9-NEXT: s_cmp_gt_i32 s13, s5
+; GFX9-NEXT: s_cselect_b32 s5, s13, s5
+; GFX9-NEXT: s_cmp_lt_i32 s5, s12
+; GFX9-NEXT: s_cselect_b32 s5, s5, s12
+; GFX9-NEXT: s_add_i32 s0, s0, s5
+; GFX9-NEXT: s_cmp_gt_i32 s1, 0
+; GFX9-NEXT: s_cselect_b32 s5, s1, 0
+; GFX9-NEXT: s_sub_i32 s5, s10, s5
+; GFX9-NEXT: s_cmp_lt_i32 s1, 0
+; GFX9-NEXT: s_cselect_b32 s12, s1, 0
+; GFX9-NEXT: s_sub_i32 s12, s11, s12
+; GFX9-NEXT: s_cmp_gt_i32 s12, s6
+; GFX9-NEXT: s_cselect_b32 s6, s12, s6
+; GFX9-NEXT: s_cmp_lt_i32 s6, s5
+; GFX9-NEXT: s_cselect_b32 s5, s6, s5
+; GFX9-NEXT: s_add_i32 s1, s1, s5
+; GFX9-NEXT: s_cmp_gt_i32 s2, 0
+; GFX9-NEXT: s_cselect_b32 s5, s2, 0
+; GFX9-NEXT: s_sub_i32 s5, s10, s5
+; GFX9-NEXT: s_cmp_lt_i32 s2, 0
+; GFX9-NEXT: s_cselect_b32 s6, s2, 0
+; GFX9-NEXT: s_sub_i32 s6, s11, s6
+; GFX9-NEXT: s_cmp_gt_i32 s6, s7
+; GFX9-NEXT: s_cselect_b32 s6, s6, s7
+; GFX9-NEXT: s_cmp_lt_i32 s6, s5
+; GFX9-NEXT: s_cselect_b32 s5, s6, s5
+; GFX9-NEXT: s_add_i32 s2, s2, s5
+; GFX9-NEXT: s_cmp_gt_i32 s3, 0
+; GFX9-NEXT: s_cselect_b32 s5, s3, 0
+; GFX9-NEXT: s_sub_i32 s5, s10, s5
+; GFX9-NEXT: s_cmp_lt_i32 s3, 0
+; GFX9-NEXT: s_cselect_b32 s6, s3, 0
+; GFX9-NEXT: s_sub_i32 s6, s11, s6
+; GFX9-NEXT: s_cmp_gt_i32 s6, s8
+; GFX9-NEXT: s_cselect_b32 s6, s6, s8
+; GFX9-NEXT: s_cmp_lt_i32 s6, s5
+; GFX9-NEXT: s_cselect_b32 s5, s6, s5
+; GFX9-NEXT: s_add_i32 s3, s3, s5
+; GFX9-NEXT: s_cmp_gt_i32 s4, 0
+; GFX9-NEXT: s_cselect_b32 s5, s4, 0
+; GFX9-NEXT: s_sub_i32 s5, s10, s5
+; GFX9-NEXT: s_cmp_lt_i32 s4, 0
+; GFX9-NEXT: s_cselect_b32 s6, s4, 0
+; GFX9-NEXT: s_sub_i32 s6, s11, s6
+; GFX9-NEXT: s_cmp_gt_i32 s6, s9
+; GFX9-NEXT: s_cselect_b32 s6, s6, s9
+; GFX9-NEXT: s_cmp_lt_i32 s6, s5
+; GFX9-NEXT: s_cselect_b32 s5, s6, s5
+; GFX9-NEXT: s_add_i32 s4, s4, s5
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_v5i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_gt_i32 s0, 0
+; GFX10-NEXT: s_brev_b32 s10, -2
+; GFX10-NEXT: s_cselect_b32 s11, s0, 0
+; GFX10-NEXT: s_mov_b32 s12, 0x80000000
+; GFX10-NEXT: s_sub_i32 s11, s10, s11
+; GFX10-NEXT: s_cmp_lt_i32 s0, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s13, s0, 0
+; GFX10-NEXT: s_sub_i32 s13, s12, s13
+; GFX10-NEXT: s_cmp_gt_i32 s13, s5
+; GFX10-NEXT: s_cselect_b32 s5, s13, s5
+; GFX10-NEXT: s_cmp_lt_i32 s5, s11
+; GFX10-NEXT: s_cselect_b32 s5, s5, s11
+; GFX10-NEXT: s_add_i32 s0, s0, s5
+; GFX10-NEXT: s_cmp_gt_i32 s1, 0
+; GFX10-NEXT: s_cselect_b32 s5, s1, 0
+; GFX10-NEXT: s_sub_i32 s5, s10, s5
+; GFX10-NEXT: s_cmp_lt_i32 s1, 0
+; GFX10-NEXT: s_cselect_b32 s11, s1, 0
+; GFX10-NEXT: s_sub_i32 s11, s12, s11
+; GFX10-NEXT: s_cmp_gt_i32 s11, s6
+; GFX10-NEXT: s_cselect_b32 s6, s11, s6
+; GFX10-NEXT: s_cmp_lt_i32 s6, s5
+; GFX10-NEXT: s_cselect_b32 s5, s6, s5
+; GFX10-NEXT: s_add_i32 s1, s1, s5
+; GFX10-NEXT: s_cmp_gt_i32 s2, 0
+; GFX10-NEXT: s_cselect_b32 s5, s2, 0
+; GFX10-NEXT: s_sub_i32 s5, s10, s5
+; GFX10-NEXT: s_cmp_lt_i32 s2, 0
+; GFX10-NEXT: s_cselect_b32 s6, s2, 0
+; GFX10-NEXT: s_sub_i32 s6, s12, s6
+; GFX10-NEXT: s_cmp_gt_i32 s6, s7
+; GFX10-NEXT: s_cselect_b32 s6, s6, s7
+; GFX10-NEXT: s_cmp_lt_i32 s6, s5
+; GFX10-NEXT: s_cselect_b32 s5, s6, s5
+; GFX10-NEXT: s_add_i32 s2, s2, s5
+; GFX10-NEXT: s_cmp_gt_i32 s3, 0
+; GFX10-NEXT: s_cselect_b32 s5, s3, 0
+; GFX10-NEXT: s_sub_i32 s5, s10, s5
+; GFX10-NEXT: s_cmp_lt_i32 s3, 0
+; GFX10-NEXT: s_cselect_b32 s6, s3, 0
+; GFX10-NEXT: s_sub_i32 s6, s12, s6
+; GFX10-NEXT: s_cmp_gt_i32 s6, s8
+; GFX10-NEXT: s_cselect_b32 s6, s6, s8
+; GFX10-NEXT: s_cmp_lt_i32 s6, s5
+; GFX10-NEXT: s_cselect_b32 s5, s6, s5
+; GFX10-NEXT: s_add_i32 s3, s3, s5
+; GFX10-NEXT: s_cmp_gt_i32 s4, 0
+; GFX10-NEXT: s_cselect_b32 s5, s4, 0
+; GFX10-NEXT: s_sub_i32 s5, s10, s5
+; GFX10-NEXT: s_cmp_lt_i32 s4, 0
+; GFX10-NEXT: s_cselect_b32 s6, s4, 0
+; GFX10-NEXT: s_sub_i32 s6, s12, s6
+; GFX10-NEXT: s_cmp_gt_i32 s6, s9
+; GFX10-NEXT: s_cselect_b32 s6, s6, s9
+; GFX10-NEXT: s_cmp_lt_i32 s6, s5
+; GFX10-NEXT: s_cselect_b32 s5, s6, s5
+; GFX10-NEXT: s_add_i32 s4, s4, s5
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <5 x i32> @llvm.sadd.sat.v5i32(<5 x i32> %lhs, <5 x i32> %rhs)
+ ret <5 x i32> %result
+}
+
+define <16 x i32> @v_saddsat_v16i32(<16 x i32> %lhs, <16 x i32> %rhs) {
+; GFX6-LABEL: v_saddsat_v16i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: s_mov_b32 s4, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v32, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, s4, v32
+; GFX6-NEXT: v_max_i32_e32 v16, v32, v16
+; GFX6-NEXT: s_brev_b32 s5, -2
+; GFX6-NEXT: v_max_i32_e32 v32, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v32, vcc, s5, v32
+; GFX6-NEXT: v_min_i32_e32 v16, v16, v32
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v16
+; GFX6-NEXT: v_min_i32_e32 v16, 0, v1
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, s4, v16
+; GFX6-NEXT: v_max_i32_e32 v16, v16, v17
+; GFX6-NEXT: v_max_i32_e32 v17, 0, v1
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, s5, v17
+; GFX6-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v16
+; GFX6-NEXT: v_min_i32_e32 v16, 0, v2
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, s4, v16
+; GFX6-NEXT: v_max_i32_e32 v17, 0, v2
+; GFX6-NEXT: v_max_i32_e32 v16, v16, v18
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, s5, v17
+; GFX6-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v16
+; GFX6-NEXT: v_mov_b32_e32 v16, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v17, 0, v3
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v16, v17
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_bfrev_b32_e32 v18, -2
+; GFX6-NEXT: v_max_i32_e32 v19, 0, v3
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v18, v19
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v17
+; GFX6-NEXT: v_min_i32_e32 v17, 0, v4
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v16, v17
+; GFX6-NEXT: v_max_i32_e32 v19, 0, v4
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v20
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v18, v19
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v17
+; GFX6-NEXT: v_min_i32_e32 v17, 0, v5
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v16, v17
+; GFX6-NEXT: v_max_i32_e32 v19, 0, v5
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v21
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v18, v19
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_add_i32_e32 v5, vcc, v5, v17
+; GFX6-NEXT: v_min_i32_e32 v17, 0, v6
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v16, v17
+; GFX6-NEXT: v_max_i32_e32 v19, 0, v6
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v22
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v18, v19
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_add_i32_e32 v6, vcc, v6, v17
+; GFX6-NEXT: v_min_i32_e32 v17, 0, v7
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v16, v17
+; GFX6-NEXT: v_max_i32_e32 v19, 0, v7
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v23
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v18, v19
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_add_i32_e32 v7, vcc, v7, v17
+; GFX6-NEXT: v_min_i32_e32 v17, 0, v8
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v16, v17
+; GFX6-NEXT: v_max_i32_e32 v19, 0, v8
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v24
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v18, v19
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_add_i32_e32 v8, vcc, v8, v17
+; GFX6-NEXT: v_min_i32_e32 v17, 0, v9
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v16, v17
+; GFX6-NEXT: v_max_i32_e32 v19, 0, v9
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v25
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v18, v19
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_add_i32_e32 v9, vcc, v9, v17
+; GFX6-NEXT: v_min_i32_e32 v17, 0, v10
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v16, v17
+; GFX6-NEXT: v_max_i32_e32 v19, 0, v10
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v26
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v18, v19
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_add_i32_e32 v10, vcc, v10, v17
+; GFX6-NEXT: v_min_i32_e32 v17, 0, v11
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v16, v17
+; GFX6-NEXT: v_max_i32_e32 v19, 0, v11
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v27
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v18, v19
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_add_i32_e32 v11, vcc, v11, v17
+; GFX6-NEXT: v_min_i32_e32 v17, 0, v12
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v16, v17
+; GFX6-NEXT: v_max_i32_e32 v19, 0, v12
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v28
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v18, v19
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_add_i32_e32 v12, vcc, v12, v17
+; GFX6-NEXT: v_min_i32_e32 v17, 0, v13
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v16, v17
+; GFX6-NEXT: v_max_i32_e32 v19, 0, v13
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v29
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v18, v19
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_add_i32_e32 v13, vcc, v13, v17
+; GFX6-NEXT: v_min_i32_e32 v17, 0, v14
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v16, v17
+; GFX6-NEXT: v_max_i32_e32 v19, 0, v14
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v30
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v18, v19
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_add_i32_e32 v14, vcc, v14, v17
+; GFX6-NEXT: v_max_i32_e32 v17, 0, v15
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v18, v17
+; GFX6-NEXT: v_min_i32_e32 v18, 0, v15
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, v16, v18
+; GFX6-NEXT: v_max_i32_e32 v16, v16, v31
+; GFX6-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX6-NEXT: v_add_i32_e32 v15, vcc, v15, v16
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_v16i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s4, 0x80000000
+; GFX8-NEXT: v_min_i32_e32 v32, 0, v0
+; GFX8-NEXT: v_sub_u32_e32 v32, vcc, s4, v32
+; GFX8-NEXT: v_max_i32_e32 v16, v32, v16
+; GFX8-NEXT: s_brev_b32 s5, -2
+; GFX8-NEXT: v_max_i32_e32 v32, 0, v0
+; GFX8-NEXT: v_sub_u32_e32 v32, vcc, s5, v32
+; GFX8-NEXT: v_min_i32_e32 v16, v16, v32
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v16
+; GFX8-NEXT: v_min_i32_e32 v16, 0, v1
+; GFX8-NEXT: v_sub_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_max_i32_e32 v16, v16, v17
+; GFX8-NEXT: v_max_i32_e32 v17, 0, v1
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, s5, v17
+; GFX8-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v16
+; GFX8-NEXT: v_min_i32_e32 v16, 0, v2
+; GFX8-NEXT: v_sub_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_max_i32_e32 v17, 0, v2
+; GFX8-NEXT: v_max_i32_e32 v16, v16, v18
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, s5, v17
+; GFX8-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v16
+; GFX8-NEXT: v_mov_b32_e32 v16, 0x80000000
+; GFX8-NEXT: v_min_i32_e32 v17, 0, v3
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v16, v17
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_bfrev_b32_e32 v18, -2
+; GFX8-NEXT: v_max_i32_e32 v19, 0, v3
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v18, v19
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v17
+; GFX8-NEXT: v_min_i32_e32 v17, 0, v4
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v16, v17
+; GFX8-NEXT: v_max_i32_e32 v19, 0, v4
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v20
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v18, v19
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v17
+; GFX8-NEXT: v_min_i32_e32 v17, 0, v5
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v16, v17
+; GFX8-NEXT: v_max_i32_e32 v19, 0, v5
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v21
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v18, v19
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v17
+; GFX8-NEXT: v_min_i32_e32 v17, 0, v6
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v16, v17
+; GFX8-NEXT: v_max_i32_e32 v19, 0, v6
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v22
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v18, v19
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v17
+; GFX8-NEXT: v_min_i32_e32 v17, 0, v7
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v16, v17
+; GFX8-NEXT: v_max_i32_e32 v19, 0, v7
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v23
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v18, v19
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v17
+; GFX8-NEXT: v_min_i32_e32 v17, 0, v8
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v16, v17
+; GFX8-NEXT: v_max_i32_e32 v19, 0, v8
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v24
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v18, v19
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v17
+; GFX8-NEXT: v_min_i32_e32 v17, 0, v9
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v16, v17
+; GFX8-NEXT: v_max_i32_e32 v19, 0, v9
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v25
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v18, v19
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_add_u32_e32 v9, vcc, v9, v17
+; GFX8-NEXT: v_min_i32_e32 v17, 0, v10
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v16, v17
+; GFX8-NEXT: v_max_i32_e32 v19, 0, v10
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v26
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v18, v19
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v17
+; GFX8-NEXT: v_min_i32_e32 v17, 0, v11
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v16, v17
+; GFX8-NEXT: v_max_i32_e32 v19, 0, v11
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v27
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v18, v19
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_add_u32_e32 v11, vcc, v11, v17
+; GFX8-NEXT: v_min_i32_e32 v17, 0, v12
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v16, v17
+; GFX8-NEXT: v_max_i32_e32 v19, 0, v12
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v28
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v18, v19
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v17
+; GFX8-NEXT: v_min_i32_e32 v17, 0, v13
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v16, v17
+; GFX8-NEXT: v_max_i32_e32 v19, 0, v13
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v29
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v18, v19
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_add_u32_e32 v13, vcc, v13, v17
+; GFX8-NEXT: v_min_i32_e32 v17, 0, v14
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v16, v17
+; GFX8-NEXT: v_max_i32_e32 v19, 0, v14
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v30
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v18, v19
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_add_u32_e32 v14, vcc, v14, v17
+; GFX8-NEXT: v_max_i32_e32 v17, 0, v15
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v18, v17
+; GFX8-NEXT: v_min_i32_e32 v18, 0, v15
+; GFX8-NEXT: v_sub_u32_e32 v16, vcc, v16, v18
+; GFX8-NEXT: v_max_i32_e32 v16, v16, v31
+; GFX8-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX8-NEXT: v_add_u32_e32 v15, vcc, v15, v16
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_v16i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0x80000000
+; GFX9-NEXT: v_min_i32_e32 v32, 0, v0
+; GFX9-NEXT: v_sub_u32_e32 v32, s4, v32
+; GFX9-NEXT: v_max_i32_e32 v16, v32, v16
+; GFX9-NEXT: s_brev_b32 s5, -2
+; GFX9-NEXT: v_max_i32_e32 v32, 0, v0
+; GFX9-NEXT: v_sub_u32_e32 v32, s5, v32
+; GFX9-NEXT: v_min_i32_e32 v16, v16, v32
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v16
+; GFX9-NEXT: v_min_i32_e32 v16, 0, v1
+; GFX9-NEXT: v_sub_u32_e32 v16, s4, v16
+; GFX9-NEXT: v_max_i32_e32 v16, v16, v17
+; GFX9-NEXT: v_max_i32_e32 v17, 0, v1
+; GFX9-NEXT: v_sub_u32_e32 v17, s5, v17
+; GFX9-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v16
+; GFX9-NEXT: v_min_i32_e32 v16, 0, v2
+; GFX9-NEXT: v_sub_u32_e32 v16, s4, v16
+; GFX9-NEXT: v_max_i32_e32 v17, 0, v2
+; GFX9-NEXT: v_max_i32_e32 v16, v16, v18
+; GFX9-NEXT: v_sub_u32_e32 v17, s5, v17
+; GFX9-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX9-NEXT: v_add_u32_e32 v2, v2, v16
+; GFX9-NEXT: v_mov_b32_e32 v16, 0x80000000
+; GFX9-NEXT: v_min_i32_e32 v17, 0, v3
+; GFX9-NEXT: v_sub_u32_e32 v17, v16, v17
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_bfrev_b32_e32 v18, -2
+; GFX9-NEXT: v_max_i32_e32 v19, 0, v3
+; GFX9-NEXT: v_sub_u32_e32 v19, v18, v19
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_add_u32_e32 v3, v3, v17
+; GFX9-NEXT: v_min_i32_e32 v17, 0, v4
+; GFX9-NEXT: v_sub_u32_e32 v17, v16, v17
+; GFX9-NEXT: v_max_i32_e32 v19, 0, v4
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v20
+; GFX9-NEXT: v_sub_u32_e32 v19, v18, v19
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_add_u32_e32 v4, v4, v17
+; GFX9-NEXT: v_min_i32_e32 v17, 0, v5
+; GFX9-NEXT: v_sub_u32_e32 v17, v16, v17
+; GFX9-NEXT: v_max_i32_e32 v19, 0, v5
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v21
+; GFX9-NEXT: v_sub_u32_e32 v19, v18, v19
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_add_u32_e32 v5, v5, v17
+; GFX9-NEXT: v_min_i32_e32 v17, 0, v6
+; GFX9-NEXT: v_sub_u32_e32 v17, v16, v17
+; GFX9-NEXT: v_max_i32_e32 v19, 0, v6
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v22
+; GFX9-NEXT: v_sub_u32_e32 v19, v18, v19
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_add_u32_e32 v6, v6, v17
+; GFX9-NEXT: v_min_i32_e32 v17, 0, v7
+; GFX9-NEXT: v_sub_u32_e32 v17, v16, v17
+; GFX9-NEXT: v_max_i32_e32 v19, 0, v7
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v23
+; GFX9-NEXT: v_sub_u32_e32 v19, v18, v19
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_add_u32_e32 v7, v7, v17
+; GFX9-NEXT: v_min_i32_e32 v17, 0, v8
+; GFX9-NEXT: v_sub_u32_e32 v17, v16, v17
+; GFX9-NEXT: v_max_i32_e32 v19, 0, v8
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v24
+; GFX9-NEXT: v_sub_u32_e32 v19, v18, v19
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_add_u32_e32 v8, v8, v17
+; GFX9-NEXT: v_min_i32_e32 v17, 0, v9
+; GFX9-NEXT: v_sub_u32_e32 v17, v16, v17
+; GFX9-NEXT: v_max_i32_e32 v19, 0, v9
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v25
+; GFX9-NEXT: v_sub_u32_e32 v19, v18, v19
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_add_u32_e32 v9, v9, v17
+; GFX9-NEXT: v_min_i32_e32 v17, 0, v10
+; GFX9-NEXT: v_sub_u32_e32 v17, v16, v17
+; GFX9-NEXT: v_max_i32_e32 v19, 0, v10
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v26
+; GFX9-NEXT: v_sub_u32_e32 v19, v18, v19
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_add_u32_e32 v10, v10, v17
+; GFX9-NEXT: v_min_i32_e32 v17, 0, v11
+; GFX9-NEXT: v_sub_u32_e32 v17, v16, v17
+; GFX9-NEXT: v_max_i32_e32 v19, 0, v11
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v27
+; GFX9-NEXT: v_sub_u32_e32 v19, v18, v19
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_add_u32_e32 v11, v11, v17
+; GFX9-NEXT: v_min_i32_e32 v17, 0, v12
+; GFX9-NEXT: v_sub_u32_e32 v17, v16, v17
+; GFX9-NEXT: v_max_i32_e32 v19, 0, v12
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v28
+; GFX9-NEXT: v_sub_u32_e32 v19, v18, v19
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_add_u32_e32 v12, v12, v17
+; GFX9-NEXT: v_min_i32_e32 v17, 0, v13
+; GFX9-NEXT: v_sub_u32_e32 v17, v16, v17
+; GFX9-NEXT: v_max_i32_e32 v19, 0, v13
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v29
+; GFX9-NEXT: v_sub_u32_e32 v19, v18, v19
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_add_u32_e32 v13, v13, v17
+; GFX9-NEXT: v_min_i32_e32 v17, 0, v14
+; GFX9-NEXT: v_sub_u32_e32 v17, v16, v17
+; GFX9-NEXT: v_max_i32_e32 v19, 0, v14
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v30
+; GFX9-NEXT: v_sub_u32_e32 v19, v18, v19
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_add_u32_e32 v14, v14, v17
+; GFX9-NEXT: v_max_i32_e32 v17, 0, v15
+; GFX9-NEXT: v_sub_u32_e32 v17, v18, v17
+; GFX9-NEXT: v_min_i32_e32 v18, 0, v15
+; GFX9-NEXT: v_sub_u32_e32 v16, v16, v18
+; GFX9-NEXT: v_max_i32_e32 v16, v16, v31
+; GFX9-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX9-NEXT: v_add_u32_e32 v15, v15, v16
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_v16i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_i32_e32 v32, 0, v0
+; GFX10-NEXT: s_mov_b32 s4, 0x80000000
+; GFX10-NEXT: v_max_i32_e32 v33, 0, v0
+; GFX10-NEXT: s_brev_b32 s5, -2
+; GFX10-NEXT: v_min_i32_e32 v36, 0, v2
+; GFX10-NEXT: v_sub_nc_u32_e32 v35, s4, v32
+; GFX10-NEXT: v_min_i32_e32 v32, 0, v1
+; GFX10-NEXT: v_sub_nc_u32_e32 v33, s5, v33
+; GFX10-NEXT: v_max_i32_e32 v37, 0, v1
+; GFX10-NEXT: v_sub_nc_u32_e32 v36, s4, v36
+; GFX10-NEXT: v_max_i32_e32 v16, v35, v16
+; GFX10-NEXT: v_sub_nc_u32_e32 v32, s4, v32
+; GFX10-NEXT: v_mov_b32_e32 v35, 0x80000000
+; GFX10-NEXT: v_min_i32_e32 v38, 0, v3
+; GFX10-NEXT: v_max_i32_e32 v18, v36, v18
+; GFX10-NEXT: v_min_i32_e32 v16, v16, v33
+; GFX10-NEXT: v_max_i32_e32 v33, 0, v2
+; GFX10-NEXT: v_max_i32_e32 v39, v32, v17
+; GFX10-NEXT: v_sub_nc_u32_e32 v36, v35, v38
+; GFX10-NEXT: v_sub_nc_u32_e32 v37, s5, v37
+; GFX10-NEXT: v_bfrev_b32_e32 v34, -2
+; GFX10-NEXT: v_sub_nc_u32_e32 v32, s5, v33
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v16
+; GFX10-NEXT: v_max_i32_e32 v33, 0, v3
+; GFX10-NEXT: v_min_i32_e32 v39, v39, v37
+; GFX10-NEXT: v_max_i32_e32 v19, v36, v19
+; GFX10-NEXT: v_min_i32_e32 v16, v18, v32
+; GFX10-NEXT: v_min_i32_e32 v32, 0, v6
+; GFX10-NEXT: v_sub_nc_u32_e32 v18, v34, v33
+; GFX10-NEXT: v_min_i32_e32 v38, 0, v5
+; GFX10-NEXT: v_max_i32_e32 v17, 0, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v2, v2, v16
+; GFX10-NEXT: v_min_i32_e32 v16, 0, v4
+; GFX10-NEXT: v_min_i32_e32 v18, v19, v18
+; GFX10-NEXT: v_sub_nc_u32_e32 v19, v35, v38
+; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v39
+; GFX10-NEXT: v_sub_nc_u32_e32 v32, v35, v32
+; GFX10-NEXT: v_sub_nc_u32_e32 v39, v35, v16
+; GFX10-NEXT: v_max_i32_e32 v33, 0, v5
+; GFX10-NEXT: v_max_i32_e32 v36, 0, v6
+; GFX10-NEXT: v_max_i32_e32 v19, v19, v21
+; GFX10-NEXT: v_add_nc_u32_e32 v3, v3, v18
+; GFX10-NEXT: v_max_i32_e32 v16, v39, v20
+; GFX10-NEXT: v_sub_nc_u32_e32 v17, v34, v17
+; GFX10-NEXT: v_sub_nc_u32_e32 v20, v34, v33
+; GFX10-NEXT: v_sub_nc_u32_e32 v21, v34, v36
+; GFX10-NEXT: v_max_i32_e32 v22, v32, v22
+; GFX10-NEXT: v_min_i32_e32 v18, 0, v7
+; GFX10-NEXT: v_min_i32_e32 v39, v16, v17
+; GFX10-NEXT: v_min_i32_e32 v38, v19, v20
+; GFX10-NEXT: v_max_i32_e32 v16, 0, v7
+; GFX10-NEXT: v_min_i32_e32 v19, v22, v21
+; GFX10-NEXT: v_sub_nc_u32_e32 v17, v35, v18
+; GFX10-NEXT: v_min_i32_e32 v18, 0, v8
+; GFX10-NEXT: v_min_i32_e32 v20, 0, v9
+; GFX10-NEXT: v_sub_nc_u32_e32 v16, v34, v16
+; GFX10-NEXT: v_add_nc_u32_e32 v6, v6, v19
+; GFX10-NEXT: v_max_i32_e32 v19, 0, v8
+; GFX10-NEXT: v_max_i32_e32 v17, v17, v23
+; GFX10-NEXT: v_sub_nc_u32_e32 v18, v35, v18
+; GFX10-NEXT: v_min_i32_e32 v22, 0, v10
+; GFX10-NEXT: v_max_i32_e32 v21, 0, v9
+; GFX10-NEXT: v_sub_nc_u32_e32 v20, v35, v20
+; GFX10-NEXT: v_add_nc_u32_e32 v4, v4, v39
+; GFX10-NEXT: v_max_i32_e32 v18, v18, v24
+; GFX10-NEXT: v_sub_nc_u32_e32 v39, v35, v22
+; GFX10-NEXT: v_min_i32_e32 v16, v17, v16
+; GFX10-NEXT: v_sub_nc_u32_e32 v19, v34, v19
+; GFX10-NEXT: v_max_i32_e32 v23, 0, v10
+; GFX10-NEXT: v_max_i32_e32 v20, v20, v25
+; GFX10-NEXT: v_sub_nc_u32_e32 v21, v34, v21
+; GFX10-NEXT: v_add_nc_u32_e32 v7, v7, v16
+; GFX10-NEXT: v_min_i32_e32 v17, v18, v19
+; GFX10-NEXT: v_min_i32_e32 v16, 0, v11
+; GFX10-NEXT: v_sub_nc_u32_e32 v18, v34, v23
+; GFX10-NEXT: v_max_i32_e32 v19, v39, v26
+; GFX10-NEXT: v_min_i32_e32 v22, 0, v12
+; GFX10-NEXT: v_min_i32_e32 v20, v20, v21
+; GFX10-NEXT: v_sub_nc_u32_e32 v16, v35, v16
+; GFX10-NEXT: v_min_i32_e32 v26, 0, v15
+; GFX10-NEXT: v_add_nc_u32_e32 v8, v8, v17
+; GFX10-NEXT: v_min_i32_e32 v17, v19, v18
+; GFX10-NEXT: v_sub_nc_u32_e32 v19, v35, v22
+; GFX10-NEXT: v_min_i32_e32 v22, 0, v14
+; GFX10-NEXT: v_min_i32_e32 v21, 0, v13
+; GFX10-NEXT: v_max_i32_e32 v24, 0, v14
+; GFX10-NEXT: v_max_i32_e32 v25, 0, v15
+; GFX10-NEXT: v_add_nc_u32_e32 v9, v9, v20
+; GFX10-NEXT: v_max_i32_e32 v20, 0, v13
+; GFX10-NEXT: v_sub_nc_u32_e32 v39, v35, v22
+; GFX10-NEXT: v_max_i32_e32 v23, 0, v11
+; GFX10-NEXT: v_add_nc_u32_e32 v10, v10, v17
+; GFX10-NEXT: v_max_i32_e32 v17, 0, v12
+; GFX10-NEXT: v_max_i32_e32 v16, v16, v27
+; GFX10-NEXT: v_sub_nc_u32_e32 v27, v35, v21
+; GFX10-NEXT: v_sub_nc_u32_e32 v26, v35, v26
+; GFX10-NEXT: v_sub_nc_u32_e32 v18, v34, v23
+; GFX10-NEXT: v_sub_nc_u32_e32 v17, v34, v17
+; GFX10-NEXT: v_max_i32_e32 v19, v19, v28
+; GFX10-NEXT: v_sub_nc_u32_e32 v20, v34, v20
+; GFX10-NEXT: v_max_i32_e32 v21, v27, v29
+; GFX10-NEXT: v_sub_nc_u32_e32 v24, v34, v24
+; GFX10-NEXT: v_max_i32_e32 v22, v39, v30
+; GFX10-NEXT: v_sub_nc_u32_e32 v25, v34, v25
+; GFX10-NEXT: v_max_i32_e32 v23, v26, v31
+; GFX10-NEXT: v_min_i32_e32 v16, v16, v18
+; GFX10-NEXT: v_min_i32_e32 v17, v19, v17
+; GFX10-NEXT: v_min_i32_e32 v18, v21, v20
+; GFX10-NEXT: v_min_i32_e32 v19, v22, v24
+; GFX10-NEXT: v_min_i32_e32 v20, v23, v25
+; GFX10-NEXT: v_add_nc_u32_e32 v5, v5, v38
+; GFX10-NEXT: v_add_nc_u32_e32 v11, v11, v16
+; GFX10-NEXT: v_add_nc_u32_e32 v12, v12, v17
+; GFX10-NEXT: v_add_nc_u32_e32 v13, v13, v18
+; GFX10-NEXT: v_add_nc_u32_e32 v14, v14, v19
+; GFX10-NEXT: v_add_nc_u32_e32 v15, v15, v20
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %lhs, <16 x i32> %rhs)
+ ret <16 x i32> %result
+}
+
+define amdgpu_ps <16 x i32> @s_saddsat_v16i32(<16 x i32> inreg %lhs, <16 x i32> inreg %rhs) {
+; GFX6-LABEL: s_saddsat_v16i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_brev_b32 s32, -2
+; GFX6-NEXT: s_cselect_b32 s34, s0, 0
+; GFX6-NEXT: s_sub_i32 s34, s32, s34
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_mov_b32 s33, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s35, s0, 0
+; GFX6-NEXT: s_sub_i32 s35, s33, s35
+; GFX6-NEXT: s_cmp_gt_i32 s35, s16
+; GFX6-NEXT: s_cselect_b32 s16, s35, s16
+; GFX6-NEXT: s_cmp_lt_i32 s16, s34
+; GFX6-NEXT: s_cselect_b32 s16, s16, s34
+; GFX6-NEXT: s_add_i32 s0, s0, s16
+; GFX6-NEXT: s_cmp_gt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s16, s1, 0
+; GFX6-NEXT: s_sub_i32 s16, s32, s16
+; GFX6-NEXT: s_cmp_lt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s34, s1, 0
+; GFX6-NEXT: s_sub_i32 s34, s33, s34
+; GFX6-NEXT: s_cmp_gt_i32 s34, s17
+; GFX6-NEXT: s_cselect_b32 s17, s34, s17
+; GFX6-NEXT: s_cmp_lt_i32 s17, s16
+; GFX6-NEXT: s_cselect_b32 s16, s17, s16
+; GFX6-NEXT: s_add_i32 s1, s1, s16
+; GFX6-NEXT: s_cmp_gt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s16, s2, 0
+; GFX6-NEXT: s_sub_i32 s16, s32, s16
+; GFX6-NEXT: s_cmp_lt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s17, s2, 0
+; GFX6-NEXT: s_sub_i32 s17, s33, s17
+; GFX6-NEXT: s_cmp_gt_i32 s17, s18
+; GFX6-NEXT: s_cselect_b32 s17, s17, s18
+; GFX6-NEXT: s_cmp_lt_i32 s17, s16
+; GFX6-NEXT: s_cselect_b32 s16, s17, s16
+; GFX6-NEXT: s_add_i32 s2, s2, s16
+; GFX6-NEXT: s_cmp_gt_i32 s3, 0
+; GFX6-NEXT: s_cselect_b32 s16, s3, 0
+; GFX6-NEXT: s_sub_i32 s16, s32, s16
+; GFX6-NEXT: s_cmp_lt_i32 s3, 0
+; GFX6-NEXT: s_cselect_b32 s17, s3, 0
+; GFX6-NEXT: s_sub_i32 s17, s33, s17
+; GFX6-NEXT: s_cmp_gt_i32 s17, s19
+; GFX6-NEXT: s_cselect_b32 s17, s17, s19
+; GFX6-NEXT: s_cmp_lt_i32 s17, s16
+; GFX6-NEXT: s_cselect_b32 s16, s17, s16
+; GFX6-NEXT: s_add_i32 s3, s3, s16
+; GFX6-NEXT: s_cmp_gt_i32 s4, 0
+; GFX6-NEXT: s_cselect_b32 s16, s4, 0
+; GFX6-NEXT: s_sub_i32 s16, s32, s16
+; GFX6-NEXT: s_cmp_lt_i32 s4, 0
+; GFX6-NEXT: s_cselect_b32 s17, s4, 0
+; GFX6-NEXT: s_sub_i32 s17, s33, s17
+; GFX6-NEXT: s_cmp_gt_i32 s17, s20
+; GFX6-NEXT: s_cselect_b32 s17, s17, s20
+; GFX6-NEXT: s_cmp_lt_i32 s17, s16
+; GFX6-NEXT: s_cselect_b32 s16, s17, s16
+; GFX6-NEXT: s_add_i32 s4, s4, s16
+; GFX6-NEXT: s_cmp_gt_i32 s5, 0
+; GFX6-NEXT: s_cselect_b32 s16, s5, 0
+; GFX6-NEXT: s_sub_i32 s16, s32, s16
+; GFX6-NEXT: s_cmp_lt_i32 s5, 0
+; GFX6-NEXT: s_cselect_b32 s17, s5, 0
+; GFX6-NEXT: s_sub_i32 s17, s33, s17
+; GFX6-NEXT: s_cmp_gt_i32 s17, s21
+; GFX6-NEXT: s_cselect_b32 s17, s17, s21
+; GFX6-NEXT: s_cmp_lt_i32 s17, s16
+; GFX6-NEXT: s_cselect_b32 s16, s17, s16
+; GFX6-NEXT: s_add_i32 s5, s5, s16
+; GFX6-NEXT: s_cmp_gt_i32 s6, 0
+; GFX6-NEXT: s_cselect_b32 s16, s6, 0
+; GFX6-NEXT: s_sub_i32 s16, s32, s16
+; GFX6-NEXT: s_cmp_lt_i32 s6, 0
+; GFX6-NEXT: s_cselect_b32 s17, s6, 0
+; GFX6-NEXT: s_sub_i32 s17, s33, s17
+; GFX6-NEXT: s_cmp_gt_i32 s17, s22
+; GFX6-NEXT: s_cselect_b32 s17, s17, s22
+; GFX6-NEXT: s_cmp_lt_i32 s17, s16
+; GFX6-NEXT: s_cselect_b32 s16, s17, s16
+; GFX6-NEXT: s_add_i32 s6, s6, s16
+; GFX6-NEXT: s_cmp_gt_i32 s7, 0
+; GFX6-NEXT: s_cselect_b32 s16, s7, 0
+; GFX6-NEXT: s_sub_i32 s16, s32, s16
+; GFX6-NEXT: s_cmp_lt_i32 s7, 0
+; GFX6-NEXT: s_cselect_b32 s17, s7, 0
+; GFX6-NEXT: s_sub_i32 s17, s33, s17
+; GFX6-NEXT: s_cmp_gt_i32 s17, s23
+; GFX6-NEXT: s_cselect_b32 s17, s17, s23
+; GFX6-NEXT: s_cmp_lt_i32 s17, s16
+; GFX6-NEXT: s_cselect_b32 s16, s17, s16
+; GFX6-NEXT: s_add_i32 s7, s7, s16
+; GFX6-NEXT: s_cmp_gt_i32 s8, 0
+; GFX6-NEXT: s_cselect_b32 s16, s8, 0
+; GFX6-NEXT: s_sub_i32 s16, s32, s16
+; GFX6-NEXT: s_cmp_lt_i32 s8, 0
+; GFX6-NEXT: s_cselect_b32 s17, s8, 0
+; GFX6-NEXT: s_sub_i32 s17, s33, s17
+; GFX6-NEXT: s_cmp_gt_i32 s17, s24
+; GFX6-NEXT: s_cselect_b32 s17, s17, s24
+; GFX6-NEXT: s_cmp_lt_i32 s17, s16
+; GFX6-NEXT: s_cselect_b32 s16, s17, s16
+; GFX6-NEXT: s_add_i32 s8, s8, s16
+; GFX6-NEXT: s_cmp_gt_i32 s9, 0
+; GFX6-NEXT: s_cselect_b32 s16, s9, 0
+; GFX6-NEXT: s_sub_i32 s16, s32, s16
+; GFX6-NEXT: s_cmp_lt_i32 s9, 0
+; GFX6-NEXT: s_cselect_b32 s17, s9, 0
+; GFX6-NEXT: s_sub_i32 s17, s33, s17
+; GFX6-NEXT: s_cmp_gt_i32 s17, s25
+; GFX6-NEXT: s_cselect_b32 s17, s17, s25
+; GFX6-NEXT: s_cmp_lt_i32 s17, s16
+; GFX6-NEXT: s_cselect_b32 s16, s17, s16
+; GFX6-NEXT: s_add_i32 s9, s9, s16
+; GFX6-NEXT: s_cmp_gt_i32 s10, 0
+; GFX6-NEXT: s_cselect_b32 s16, s10, 0
+; GFX6-NEXT: s_sub_i32 s16, s32, s16
+; GFX6-NEXT: s_cmp_lt_i32 s10, 0
+; GFX6-NEXT: s_cselect_b32 s17, s10, 0
+; GFX6-NEXT: s_sub_i32 s17, s33, s17
+; GFX6-NEXT: s_cmp_gt_i32 s17, s26
+; GFX6-NEXT: s_cselect_b32 s17, s17, s26
+; GFX6-NEXT: s_cmp_lt_i32 s17, s16
+; GFX6-NEXT: s_cselect_b32 s16, s17, s16
+; GFX6-NEXT: s_add_i32 s10, s10, s16
+; GFX6-NEXT: s_cmp_gt_i32 s11, 0
+; GFX6-NEXT: s_cselect_b32 s16, s11, 0
+; GFX6-NEXT: s_sub_i32 s16, s32, s16
+; GFX6-NEXT: s_cmp_lt_i32 s11, 0
+; GFX6-NEXT: s_cselect_b32 s17, s11, 0
+; GFX6-NEXT: s_sub_i32 s17, s33, s17
+; GFX6-NEXT: s_cmp_gt_i32 s17, s27
+; GFX6-NEXT: s_cselect_b32 s17, s17, s27
+; GFX6-NEXT: s_cmp_lt_i32 s17, s16
+; GFX6-NEXT: s_cselect_b32 s16, s17, s16
+; GFX6-NEXT: s_add_i32 s11, s11, s16
+; GFX6-NEXT: s_cmp_gt_i32 s12, 0
+; GFX6-NEXT: s_cselect_b32 s16, s12, 0
+; GFX6-NEXT: s_sub_i32 s16, s32, s16
+; GFX6-NEXT: s_cmp_lt_i32 s12, 0
+; GFX6-NEXT: s_cselect_b32 s17, s12, 0
+; GFX6-NEXT: s_sub_i32 s17, s33, s17
+; GFX6-NEXT: s_cmp_gt_i32 s17, s28
+; GFX6-NEXT: s_cselect_b32 s17, s17, s28
+; GFX6-NEXT: s_cmp_lt_i32 s17, s16
+; GFX6-NEXT: s_cselect_b32 s16, s17, s16
+; GFX6-NEXT: s_add_i32 s12, s12, s16
+; GFX6-NEXT: s_cmp_gt_i32 s13, 0
+; GFX6-NEXT: s_cselect_b32 s16, s13, 0
+; GFX6-NEXT: s_sub_i32 s16, s32, s16
+; GFX6-NEXT: s_cmp_lt_i32 s13, 0
+; GFX6-NEXT: s_cselect_b32 s17, s13, 0
+; GFX6-NEXT: s_sub_i32 s17, s33, s17
+; GFX6-NEXT: s_cmp_gt_i32 s17, s29
+; GFX6-NEXT: s_cselect_b32 s17, s17, s29
+; GFX6-NEXT: s_cmp_lt_i32 s17, s16
+; GFX6-NEXT: s_cselect_b32 s16, s17, s16
+; GFX6-NEXT: s_add_i32 s13, s13, s16
+; GFX6-NEXT: s_cmp_gt_i32 s14, 0
+; GFX6-NEXT: s_cselect_b32 s16, s14, 0
+; GFX6-NEXT: s_sub_i32 s16, s32, s16
+; GFX6-NEXT: s_cmp_lt_i32 s14, 0
+; GFX6-NEXT: s_cselect_b32 s17, s14, 0
+; GFX6-NEXT: s_sub_i32 s17, s33, s17
+; GFX6-NEXT: s_cmp_gt_i32 s17, s30
+; GFX6-NEXT: s_cselect_b32 s17, s17, s30
+; GFX6-NEXT: s_cmp_lt_i32 s17, s16
+; GFX6-NEXT: s_cselect_b32 s16, s17, s16
+; GFX6-NEXT: s_add_i32 s14, s14, s16
+; GFX6-NEXT: s_cmp_gt_i32 s15, 0
+; GFX6-NEXT: s_cselect_b32 s16, s15, 0
+; GFX6-NEXT: s_sub_i32 s16, s32, s16
+; GFX6-NEXT: s_cmp_lt_i32 s15, 0
+; GFX6-NEXT: s_cselect_b32 s17, s15, 0
+; GFX6-NEXT: s_sub_i32 s17, s33, s17
+; GFX6-NEXT: s_cmp_gt_i32 s17, s31
+; GFX6-NEXT: s_cselect_b32 s17, s17, s31
+; GFX6-NEXT: s_cmp_lt_i32 s17, s16
+; GFX6-NEXT: s_cselect_b32 s16, s17, s16
+; GFX6-NEXT: s_add_i32 s15, s15, s16
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_v16i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_gt_i32 s0, 0
+; GFX8-NEXT: s_brev_b32 s32, -2
+; GFX8-NEXT: s_cselect_b32 s34, s0, 0
+; GFX8-NEXT: s_sub_i32 s34, s32, s34
+; GFX8-NEXT: s_cmp_lt_i32 s0, 0
+; GFX8-NEXT: s_mov_b32 s33, 0x80000000
+; GFX8-NEXT: s_cselect_b32 s35, s0, 0
+; GFX8-NEXT: s_sub_i32 s35, s33, s35
+; GFX8-NEXT: s_cmp_gt_i32 s35, s16
+; GFX8-NEXT: s_cselect_b32 s16, s35, s16
+; GFX8-NEXT: s_cmp_lt_i32 s16, s34
+; GFX8-NEXT: s_cselect_b32 s16, s16, s34
+; GFX8-NEXT: s_add_i32 s0, s0, s16
+; GFX8-NEXT: s_cmp_gt_i32 s1, 0
+; GFX8-NEXT: s_cselect_b32 s16, s1, 0
+; GFX8-NEXT: s_sub_i32 s16, s32, s16
+; GFX8-NEXT: s_cmp_lt_i32 s1, 0
+; GFX8-NEXT: s_cselect_b32 s34, s1, 0
+; GFX8-NEXT: s_sub_i32 s34, s33, s34
+; GFX8-NEXT: s_cmp_gt_i32 s34, s17
+; GFX8-NEXT: s_cselect_b32 s17, s34, s17
+; GFX8-NEXT: s_cmp_lt_i32 s17, s16
+; GFX8-NEXT: s_cselect_b32 s16, s17, s16
+; GFX8-NEXT: s_add_i32 s1, s1, s16
+; GFX8-NEXT: s_cmp_gt_i32 s2, 0
+; GFX8-NEXT: s_cselect_b32 s16, s2, 0
+; GFX8-NEXT: s_sub_i32 s16, s32, s16
+; GFX8-NEXT: s_cmp_lt_i32 s2, 0
+; GFX8-NEXT: s_cselect_b32 s17, s2, 0
+; GFX8-NEXT: s_sub_i32 s17, s33, s17
+; GFX8-NEXT: s_cmp_gt_i32 s17, s18
+; GFX8-NEXT: s_cselect_b32 s17, s17, s18
+; GFX8-NEXT: s_cmp_lt_i32 s17, s16
+; GFX8-NEXT: s_cselect_b32 s16, s17, s16
+; GFX8-NEXT: s_add_i32 s2, s2, s16
+; GFX8-NEXT: s_cmp_gt_i32 s3, 0
+; GFX8-NEXT: s_cselect_b32 s16, s3, 0
+; GFX8-NEXT: s_sub_i32 s16, s32, s16
+; GFX8-NEXT: s_cmp_lt_i32 s3, 0
+; GFX8-NEXT: s_cselect_b32 s17, s3, 0
+; GFX8-NEXT: s_sub_i32 s17, s33, s17
+; GFX8-NEXT: s_cmp_gt_i32 s17, s19
+; GFX8-NEXT: s_cselect_b32 s17, s17, s19
+; GFX8-NEXT: s_cmp_lt_i32 s17, s16
+; GFX8-NEXT: s_cselect_b32 s16, s17, s16
+; GFX8-NEXT: s_add_i32 s3, s3, s16
+; GFX8-NEXT: s_cmp_gt_i32 s4, 0
+; GFX8-NEXT: s_cselect_b32 s16, s4, 0
+; GFX8-NEXT: s_sub_i32 s16, s32, s16
+; GFX8-NEXT: s_cmp_lt_i32 s4, 0
+; GFX8-NEXT: s_cselect_b32 s17, s4, 0
+; GFX8-NEXT: s_sub_i32 s17, s33, s17
+; GFX8-NEXT: s_cmp_gt_i32 s17, s20
+; GFX8-NEXT: s_cselect_b32 s17, s17, s20
+; GFX8-NEXT: s_cmp_lt_i32 s17, s16
+; GFX8-NEXT: s_cselect_b32 s16, s17, s16
+; GFX8-NEXT: s_add_i32 s4, s4, s16
+; GFX8-NEXT: s_cmp_gt_i32 s5, 0
+; GFX8-NEXT: s_cselect_b32 s16, s5, 0
+; GFX8-NEXT: s_sub_i32 s16, s32, s16
+; GFX8-NEXT: s_cmp_lt_i32 s5, 0
+; GFX8-NEXT: s_cselect_b32 s17, s5, 0
+; GFX8-NEXT: s_sub_i32 s17, s33, s17
+; GFX8-NEXT: s_cmp_gt_i32 s17, s21
+; GFX8-NEXT: s_cselect_b32 s17, s17, s21
+; GFX8-NEXT: s_cmp_lt_i32 s17, s16
+; GFX8-NEXT: s_cselect_b32 s16, s17, s16
+; GFX8-NEXT: s_add_i32 s5, s5, s16
+; GFX8-NEXT: s_cmp_gt_i32 s6, 0
+; GFX8-NEXT: s_cselect_b32 s16, s6, 0
+; GFX8-NEXT: s_sub_i32 s16, s32, s16
+; GFX8-NEXT: s_cmp_lt_i32 s6, 0
+; GFX8-NEXT: s_cselect_b32 s17, s6, 0
+; GFX8-NEXT: s_sub_i32 s17, s33, s17
+; GFX8-NEXT: s_cmp_gt_i32 s17, s22
+; GFX8-NEXT: s_cselect_b32 s17, s17, s22
+; GFX8-NEXT: s_cmp_lt_i32 s17, s16
+; GFX8-NEXT: s_cselect_b32 s16, s17, s16
+; GFX8-NEXT: s_add_i32 s6, s6, s16
+; GFX8-NEXT: s_cmp_gt_i32 s7, 0
+; GFX8-NEXT: s_cselect_b32 s16, s7, 0
+; GFX8-NEXT: s_sub_i32 s16, s32, s16
+; GFX8-NEXT: s_cmp_lt_i32 s7, 0
+; GFX8-NEXT: s_cselect_b32 s17, s7, 0
+; GFX8-NEXT: s_sub_i32 s17, s33, s17
+; GFX8-NEXT: s_cmp_gt_i32 s17, s23
+; GFX8-NEXT: s_cselect_b32 s17, s17, s23
+; GFX8-NEXT: s_cmp_lt_i32 s17, s16
+; GFX8-NEXT: s_cselect_b32 s16, s17, s16
+; GFX8-NEXT: s_add_i32 s7, s7, s16
+; GFX8-NEXT: s_cmp_gt_i32 s8, 0
+; GFX8-NEXT: s_cselect_b32 s16, s8, 0
+; GFX8-NEXT: s_sub_i32 s16, s32, s16
+; GFX8-NEXT: s_cmp_lt_i32 s8, 0
+; GFX8-NEXT: s_cselect_b32 s17, s8, 0
+; GFX8-NEXT: s_sub_i32 s17, s33, s17
+; GFX8-NEXT: s_cmp_gt_i32 s17, s24
+; GFX8-NEXT: s_cselect_b32 s17, s17, s24
+; GFX8-NEXT: s_cmp_lt_i32 s17, s16
+; GFX8-NEXT: s_cselect_b32 s16, s17, s16
+; GFX8-NEXT: s_add_i32 s8, s8, s16
+; GFX8-NEXT: s_cmp_gt_i32 s9, 0
+; GFX8-NEXT: s_cselect_b32 s16, s9, 0
+; GFX8-NEXT: s_sub_i32 s16, s32, s16
+; GFX8-NEXT: s_cmp_lt_i32 s9, 0
+; GFX8-NEXT: s_cselect_b32 s17, s9, 0
+; GFX8-NEXT: s_sub_i32 s17, s33, s17
+; GFX8-NEXT: s_cmp_gt_i32 s17, s25
+; GFX8-NEXT: s_cselect_b32 s17, s17, s25
+; GFX8-NEXT: s_cmp_lt_i32 s17, s16
+; GFX8-NEXT: s_cselect_b32 s16, s17, s16
+; GFX8-NEXT: s_add_i32 s9, s9, s16
+; GFX8-NEXT: s_cmp_gt_i32 s10, 0
+; GFX8-NEXT: s_cselect_b32 s16, s10, 0
+; GFX8-NEXT: s_sub_i32 s16, s32, s16
+; GFX8-NEXT: s_cmp_lt_i32 s10, 0
+; GFX8-NEXT: s_cselect_b32 s17, s10, 0
+; GFX8-NEXT: s_sub_i32 s17, s33, s17
+; GFX8-NEXT: s_cmp_gt_i32 s17, s26
+; GFX8-NEXT: s_cselect_b32 s17, s17, s26
+; GFX8-NEXT: s_cmp_lt_i32 s17, s16
+; GFX8-NEXT: s_cselect_b32 s16, s17, s16
+; GFX8-NEXT: s_add_i32 s10, s10, s16
+; GFX8-NEXT: s_cmp_gt_i32 s11, 0
+; GFX8-NEXT: s_cselect_b32 s16, s11, 0
+; GFX8-NEXT: s_sub_i32 s16, s32, s16
+; GFX8-NEXT: s_cmp_lt_i32 s11, 0
+; GFX8-NEXT: s_cselect_b32 s17, s11, 0
+; GFX8-NEXT: s_sub_i32 s17, s33, s17
+; GFX8-NEXT: s_cmp_gt_i32 s17, s27
+; GFX8-NEXT: s_cselect_b32 s17, s17, s27
+; GFX8-NEXT: s_cmp_lt_i32 s17, s16
+; GFX8-NEXT: s_cselect_b32 s16, s17, s16
+; GFX8-NEXT: s_add_i32 s11, s11, s16
+; GFX8-NEXT: s_cmp_gt_i32 s12, 0
+; GFX8-NEXT: s_cselect_b32 s16, s12, 0
+; GFX8-NEXT: s_sub_i32 s16, s32, s16
+; GFX8-NEXT: s_cmp_lt_i32 s12, 0
+; GFX8-NEXT: s_cselect_b32 s17, s12, 0
+; GFX8-NEXT: s_sub_i32 s17, s33, s17
+; GFX8-NEXT: s_cmp_gt_i32 s17, s28
+; GFX8-NEXT: s_cselect_b32 s17, s17, s28
+; GFX8-NEXT: s_cmp_lt_i32 s17, s16
+; GFX8-NEXT: s_cselect_b32 s16, s17, s16
+; GFX8-NEXT: s_add_i32 s12, s12, s16
+; GFX8-NEXT: s_cmp_gt_i32 s13, 0
+; GFX8-NEXT: s_cselect_b32 s16, s13, 0
+; GFX8-NEXT: s_sub_i32 s16, s32, s16
+; GFX8-NEXT: s_cmp_lt_i32 s13, 0
+; GFX8-NEXT: s_cselect_b32 s17, s13, 0
+; GFX8-NEXT: s_sub_i32 s17, s33, s17
+; GFX8-NEXT: s_cmp_gt_i32 s17, s29
+; GFX8-NEXT: s_cselect_b32 s17, s17, s29
+; GFX8-NEXT: s_cmp_lt_i32 s17, s16
+; GFX8-NEXT: s_cselect_b32 s16, s17, s16
+; GFX8-NEXT: s_add_i32 s13, s13, s16
+; GFX8-NEXT: s_cmp_gt_i32 s14, 0
+; GFX8-NEXT: s_cselect_b32 s16, s14, 0
+; GFX8-NEXT: s_sub_i32 s16, s32, s16
+; GFX8-NEXT: s_cmp_lt_i32 s14, 0
+; GFX8-NEXT: s_cselect_b32 s17, s14, 0
+; GFX8-NEXT: s_sub_i32 s17, s33, s17
+; GFX8-NEXT: s_cmp_gt_i32 s17, s30
+; GFX8-NEXT: s_cselect_b32 s17, s17, s30
+; GFX8-NEXT: s_cmp_lt_i32 s17, s16
+; GFX8-NEXT: s_cselect_b32 s16, s17, s16
+; GFX8-NEXT: s_add_i32 s14, s14, s16
+; GFX8-NEXT: s_cmp_gt_i32 s15, 0
+; GFX8-NEXT: s_cselect_b32 s16, s15, 0
+; GFX8-NEXT: s_sub_i32 s16, s32, s16
+; GFX8-NEXT: s_cmp_lt_i32 s15, 0
+; GFX8-NEXT: s_cselect_b32 s17, s15, 0
+; GFX8-NEXT: s_sub_i32 s17, s33, s17
+; GFX8-NEXT: s_cmp_gt_i32 s17, s31
+; GFX8-NEXT: s_cselect_b32 s17, s17, s31
+; GFX8-NEXT: s_cmp_lt_i32 s17, s16
+; GFX8-NEXT: s_cselect_b32 s16, s17, s16
+; GFX8-NEXT: s_add_i32 s15, s15, s16
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_v16i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_gt_i32 s0, 0
+; GFX9-NEXT: s_brev_b32 s32, -2
+; GFX9-NEXT: s_cselect_b32 s34, s0, 0
+; GFX9-NEXT: s_sub_i32 s34, s32, s34
+; GFX9-NEXT: s_cmp_lt_i32 s0, 0
+; GFX9-NEXT: s_mov_b32 s33, 0x80000000
+; GFX9-NEXT: s_cselect_b32 s35, s0, 0
+; GFX9-NEXT: s_sub_i32 s35, s33, s35
+; GFX9-NEXT: s_cmp_gt_i32 s35, s16
+; GFX9-NEXT: s_cselect_b32 s16, s35, s16
+; GFX9-NEXT: s_cmp_lt_i32 s16, s34
+; GFX9-NEXT: s_cselect_b32 s16, s16, s34
+; GFX9-NEXT: s_add_i32 s0, s0, s16
+; GFX9-NEXT: s_cmp_gt_i32 s1, 0
+; GFX9-NEXT: s_cselect_b32 s16, s1, 0
+; GFX9-NEXT: s_sub_i32 s16, s32, s16
+; GFX9-NEXT: s_cmp_lt_i32 s1, 0
+; GFX9-NEXT: s_cselect_b32 s34, s1, 0
+; GFX9-NEXT: s_sub_i32 s34, s33, s34
+; GFX9-NEXT: s_cmp_gt_i32 s34, s17
+; GFX9-NEXT: s_cselect_b32 s17, s34, s17
+; GFX9-NEXT: s_cmp_lt_i32 s17, s16
+; GFX9-NEXT: s_cselect_b32 s16, s17, s16
+; GFX9-NEXT: s_add_i32 s1, s1, s16
+; GFX9-NEXT: s_cmp_gt_i32 s2, 0
+; GFX9-NEXT: s_cselect_b32 s16, s2, 0
+; GFX9-NEXT: s_sub_i32 s16, s32, s16
+; GFX9-NEXT: s_cmp_lt_i32 s2, 0
+; GFX9-NEXT: s_cselect_b32 s17, s2, 0
+; GFX9-NEXT: s_sub_i32 s17, s33, s17
+; GFX9-NEXT: s_cmp_gt_i32 s17, s18
+; GFX9-NEXT: s_cselect_b32 s17, s17, s18
+; GFX9-NEXT: s_cmp_lt_i32 s17, s16
+; GFX9-NEXT: s_cselect_b32 s16, s17, s16
+; GFX9-NEXT: s_add_i32 s2, s2, s16
+; GFX9-NEXT: s_cmp_gt_i32 s3, 0
+; GFX9-NEXT: s_cselect_b32 s16, s3, 0
+; GFX9-NEXT: s_sub_i32 s16, s32, s16
+; GFX9-NEXT: s_cmp_lt_i32 s3, 0
+; GFX9-NEXT: s_cselect_b32 s17, s3, 0
+; GFX9-NEXT: s_sub_i32 s17, s33, s17
+; GFX9-NEXT: s_cmp_gt_i32 s17, s19
+; GFX9-NEXT: s_cselect_b32 s17, s17, s19
+; GFX9-NEXT: s_cmp_lt_i32 s17, s16
+; GFX9-NEXT: s_cselect_b32 s16, s17, s16
+; GFX9-NEXT: s_add_i32 s3, s3, s16
+; GFX9-NEXT: s_cmp_gt_i32 s4, 0
+; GFX9-NEXT: s_cselect_b32 s16, s4, 0
+; GFX9-NEXT: s_sub_i32 s16, s32, s16
+; GFX9-NEXT: s_cmp_lt_i32 s4, 0
+; GFX9-NEXT: s_cselect_b32 s17, s4, 0
+; GFX9-NEXT: s_sub_i32 s17, s33, s17
+; GFX9-NEXT: s_cmp_gt_i32 s17, s20
+; GFX9-NEXT: s_cselect_b32 s17, s17, s20
+; GFX9-NEXT: s_cmp_lt_i32 s17, s16
+; GFX9-NEXT: s_cselect_b32 s16, s17, s16
+; GFX9-NEXT: s_add_i32 s4, s4, s16
+; GFX9-NEXT: s_cmp_gt_i32 s5, 0
+; GFX9-NEXT: s_cselect_b32 s16, s5, 0
+; GFX9-NEXT: s_sub_i32 s16, s32, s16
+; GFX9-NEXT: s_cmp_lt_i32 s5, 0
+; GFX9-NEXT: s_cselect_b32 s17, s5, 0
+; GFX9-NEXT: s_sub_i32 s17, s33, s17
+; GFX9-NEXT: s_cmp_gt_i32 s17, s21
+; GFX9-NEXT: s_cselect_b32 s17, s17, s21
+; GFX9-NEXT: s_cmp_lt_i32 s17, s16
+; GFX9-NEXT: s_cselect_b32 s16, s17, s16
+; GFX9-NEXT: s_add_i32 s5, s5, s16
+; GFX9-NEXT: s_cmp_gt_i32 s6, 0
+; GFX9-NEXT: s_cselect_b32 s16, s6, 0
+; GFX9-NEXT: s_sub_i32 s16, s32, s16
+; GFX9-NEXT: s_cmp_lt_i32 s6, 0
+; GFX9-NEXT: s_cselect_b32 s17, s6, 0
+; GFX9-NEXT: s_sub_i32 s17, s33, s17
+; GFX9-NEXT: s_cmp_gt_i32 s17, s22
+; GFX9-NEXT: s_cselect_b32 s17, s17, s22
+; GFX9-NEXT: s_cmp_lt_i32 s17, s16
+; GFX9-NEXT: s_cselect_b32 s16, s17, s16
+; GFX9-NEXT: s_add_i32 s6, s6, s16
+; GFX9-NEXT: s_cmp_gt_i32 s7, 0
+; GFX9-NEXT: s_cselect_b32 s16, s7, 0
+; GFX9-NEXT: s_sub_i32 s16, s32, s16
+; GFX9-NEXT: s_cmp_lt_i32 s7, 0
+; GFX9-NEXT: s_cselect_b32 s17, s7, 0
+; GFX9-NEXT: s_sub_i32 s17, s33, s17
+; GFX9-NEXT: s_cmp_gt_i32 s17, s23
+; GFX9-NEXT: s_cselect_b32 s17, s17, s23
+; GFX9-NEXT: s_cmp_lt_i32 s17, s16
+; GFX9-NEXT: s_cselect_b32 s16, s17, s16
+; GFX9-NEXT: s_add_i32 s7, s7, s16
+; GFX9-NEXT: s_cmp_gt_i32 s8, 0
+; GFX9-NEXT: s_cselect_b32 s16, s8, 0
+; GFX9-NEXT: s_sub_i32 s16, s32, s16
+; GFX9-NEXT: s_cmp_lt_i32 s8, 0
+; GFX9-NEXT: s_cselect_b32 s17, s8, 0
+; GFX9-NEXT: s_sub_i32 s17, s33, s17
+; GFX9-NEXT: s_cmp_gt_i32 s17, s24
+; GFX9-NEXT: s_cselect_b32 s17, s17, s24
+; GFX9-NEXT: s_cmp_lt_i32 s17, s16
+; GFX9-NEXT: s_cselect_b32 s16, s17, s16
+; GFX9-NEXT: s_add_i32 s8, s8, s16
+; GFX9-NEXT: s_cmp_gt_i32 s9, 0
+; GFX9-NEXT: s_cselect_b32 s16, s9, 0
+; GFX9-NEXT: s_sub_i32 s16, s32, s16
+; GFX9-NEXT: s_cmp_lt_i32 s9, 0
+; GFX9-NEXT: s_cselect_b32 s17, s9, 0
+; GFX9-NEXT: s_sub_i32 s17, s33, s17
+; GFX9-NEXT: s_cmp_gt_i32 s17, s25
+; GFX9-NEXT: s_cselect_b32 s17, s17, s25
+; GFX9-NEXT: s_cmp_lt_i32 s17, s16
+; GFX9-NEXT: s_cselect_b32 s16, s17, s16
+; GFX9-NEXT: s_add_i32 s9, s9, s16
+; GFX9-NEXT: s_cmp_gt_i32 s10, 0
+; GFX9-NEXT: s_cselect_b32 s16, s10, 0
+; GFX9-NEXT: s_sub_i32 s16, s32, s16
+; GFX9-NEXT: s_cmp_lt_i32 s10, 0
+; GFX9-NEXT: s_cselect_b32 s17, s10, 0
+; GFX9-NEXT: s_sub_i32 s17, s33, s17
+; GFX9-NEXT: s_cmp_gt_i32 s17, s26
+; GFX9-NEXT: s_cselect_b32 s17, s17, s26
+; GFX9-NEXT: s_cmp_lt_i32 s17, s16
+; GFX9-NEXT: s_cselect_b32 s16, s17, s16
+; GFX9-NEXT: s_add_i32 s10, s10, s16
+; GFX9-NEXT: s_cmp_gt_i32 s11, 0
+; GFX9-NEXT: s_cselect_b32 s16, s11, 0
+; GFX9-NEXT: s_sub_i32 s16, s32, s16
+; GFX9-NEXT: s_cmp_lt_i32 s11, 0
+; GFX9-NEXT: s_cselect_b32 s17, s11, 0
+; GFX9-NEXT: s_sub_i32 s17, s33, s17
+; GFX9-NEXT: s_cmp_gt_i32 s17, s27
+; GFX9-NEXT: s_cselect_b32 s17, s17, s27
+; GFX9-NEXT: s_cmp_lt_i32 s17, s16
+; GFX9-NEXT: s_cselect_b32 s16, s17, s16
+; GFX9-NEXT: s_add_i32 s11, s11, s16
+; GFX9-NEXT: s_cmp_gt_i32 s12, 0
+; GFX9-NEXT: s_cselect_b32 s16, s12, 0
+; GFX9-NEXT: s_sub_i32 s16, s32, s16
+; GFX9-NEXT: s_cmp_lt_i32 s12, 0
+; GFX9-NEXT: s_cselect_b32 s17, s12, 0
+; GFX9-NEXT: s_sub_i32 s17, s33, s17
+; GFX9-NEXT: s_cmp_gt_i32 s17, s28
+; GFX9-NEXT: s_cselect_b32 s17, s17, s28
+; GFX9-NEXT: s_cmp_lt_i32 s17, s16
+; GFX9-NEXT: s_cselect_b32 s16, s17, s16
+; GFX9-NEXT: s_add_i32 s12, s12, s16
+; GFX9-NEXT: s_cmp_gt_i32 s13, 0
+; GFX9-NEXT: s_cselect_b32 s16, s13, 0
+; GFX9-NEXT: s_sub_i32 s16, s32, s16
+; GFX9-NEXT: s_cmp_lt_i32 s13, 0
+; GFX9-NEXT: s_cselect_b32 s17, s13, 0
+; GFX9-NEXT: s_sub_i32 s17, s33, s17
+; GFX9-NEXT: s_cmp_gt_i32 s17, s29
+; GFX9-NEXT: s_cselect_b32 s17, s17, s29
+; GFX9-NEXT: s_cmp_lt_i32 s17, s16
+; GFX9-NEXT: s_cselect_b32 s16, s17, s16
+; GFX9-NEXT: s_add_i32 s13, s13, s16
+; GFX9-NEXT: s_cmp_gt_i32 s14, 0
+; GFX9-NEXT: s_cselect_b32 s16, s14, 0
+; GFX9-NEXT: s_sub_i32 s16, s32, s16
+; GFX9-NEXT: s_cmp_lt_i32 s14, 0
+; GFX9-NEXT: s_cselect_b32 s17, s14, 0
+; GFX9-NEXT: s_sub_i32 s17, s33, s17
+; GFX9-NEXT: s_cmp_gt_i32 s17, s30
+; GFX9-NEXT: s_cselect_b32 s17, s17, s30
+; GFX9-NEXT: s_cmp_lt_i32 s17, s16
+; GFX9-NEXT: s_cselect_b32 s16, s17, s16
+; GFX9-NEXT: s_add_i32 s14, s14, s16
+; GFX9-NEXT: s_cmp_gt_i32 s15, 0
+; GFX9-NEXT: s_cselect_b32 s16, s15, 0
+; GFX9-NEXT: s_sub_i32 s16, s32, s16
+; GFX9-NEXT: s_cmp_lt_i32 s15, 0
+; GFX9-NEXT: s_cselect_b32 s17, s15, 0
+; GFX9-NEXT: s_sub_i32 s17, s33, s17
+; GFX9-NEXT: s_cmp_gt_i32 s17, s31
+; GFX9-NEXT: s_cselect_b32 s17, s17, s31
+; GFX9-NEXT: s_cmp_lt_i32 s17, s16
+; GFX9-NEXT: s_cselect_b32 s16, s17, s16
+; GFX9-NEXT: s_add_i32 s15, s15, s16
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_v16i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_gt_i32 s0, 0
+; GFX10-NEXT: s_brev_b32 s32, -2
+; GFX10-NEXT: s_cselect_b32 s33, s0, 0
+; GFX10-NEXT: s_mov_b32 s34, 0x80000000
+; GFX10-NEXT: s_sub_i32 s46, s32, s33
+; GFX10-NEXT: s_cmp_lt_i32 s0, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s35, s0, 0
+; GFX10-NEXT: s_sub_i32 s35, s34, s35
+; GFX10-NEXT: s_cmp_gt_i32 s35, s16
+; GFX10-NEXT: s_cselect_b32 s16, s35, s16
+; GFX10-NEXT: s_cmp_lt_i32 s16, s46
+; GFX10-NEXT: s_cselect_b32 s46, s16, s46
+; GFX10-NEXT: s_add_i32 s0, s0, s46
+; GFX10-NEXT: s_cmp_gt_i32 s1, 0
+; GFX10-NEXT: s_cselect_b32 s46, s1, 0
+; GFX10-NEXT: s_sub_i32 s16, s32, s46
+; GFX10-NEXT: s_cmp_lt_i32 s1, 0
+; GFX10-NEXT: s_cselect_b32 s33, s1, 0
+; GFX10-NEXT: s_sub_i32 s46, s34, s33
+; GFX10-NEXT: s_cmp_gt_i32 s46, s17
+; GFX10-NEXT: s_cselect_b32 s17, s46, s17
+; GFX10-NEXT: s_cmp_lt_i32 s17, s16
+; GFX10-NEXT: s_cselect_b32 s46, s17, s16
+; GFX10-NEXT: s_add_i32 s1, s1, s46
+; GFX10-NEXT: s_cmp_gt_i32 s2, 0
+; GFX10-NEXT: s_cselect_b32 s46, s2, 0
+; GFX10-NEXT: s_sub_i32 s16, s32, s46
+; GFX10-NEXT: s_cmp_lt_i32 s2, 0
+; GFX10-NEXT: s_cselect_b32 s17, s2, 0
+; GFX10-NEXT: s_sub_i32 s17, s34, s17
+; GFX10-NEXT: s_cmp_gt_i32 s17, s18
+; GFX10-NEXT: s_cselect_b32 s17, s17, s18
+; GFX10-NEXT: s_cmp_lt_i32 s17, s16
+; GFX10-NEXT: s_cselect_b32 s16, s17, s16
+; GFX10-NEXT: s_add_i32 s2, s2, s16
+; GFX10-NEXT: s_cmp_gt_i32 s3, 0
+; GFX10-NEXT: s_cselect_b32 s46, s3, 0
+; GFX10-NEXT: s_sub_i32 s16, s32, s46
+; GFX10-NEXT: s_cmp_lt_i32 s3, 0
+; GFX10-NEXT: s_cselect_b32 s17, s3, 0
+; GFX10-NEXT: s_sub_i32 s17, s34, s17
+; GFX10-NEXT: s_cmp_gt_i32 s17, s19
+; GFX10-NEXT: s_cselect_b32 s17, s17, s19
+; GFX10-NEXT: s_cmp_lt_i32 s17, s16
+; GFX10-NEXT: s_cselect_b32 s16, s17, s16
+; GFX10-NEXT: s_add_i32 s3, s3, s16
+; GFX10-NEXT: s_cmp_gt_i32 s4, 0
+; GFX10-NEXT: s_cselect_b32 s46, s4, 0
+; GFX10-NEXT: s_sub_i32 s16, s32, s46
+; GFX10-NEXT: s_cmp_lt_i32 s4, 0
+; GFX10-NEXT: s_cselect_b32 s17, s4, 0
+; GFX10-NEXT: s_sub_i32 s17, s34, s17
+; GFX10-NEXT: s_cmp_gt_i32 s17, s20
+; GFX10-NEXT: s_cselect_b32 s17, s17, s20
+; GFX10-NEXT: s_cmp_lt_i32 s17, s16
+; GFX10-NEXT: s_cselect_b32 s16, s17, s16
+; GFX10-NEXT: s_add_i32 s4, s4, s16
+; GFX10-NEXT: s_cmp_gt_i32 s5, 0
+; GFX10-NEXT: s_cselect_b32 s46, s5, 0
+; GFX10-NEXT: s_sub_i32 s16, s32, s46
+; GFX10-NEXT: s_cmp_lt_i32 s5, 0
+; GFX10-NEXT: s_cselect_b32 s17, s5, 0
+; GFX10-NEXT: s_sub_i32 s17, s34, s17
+; GFX10-NEXT: s_cmp_gt_i32 s17, s21
+; GFX10-NEXT: s_cselect_b32 s17, s17, s21
+; GFX10-NEXT: s_cmp_lt_i32 s17, s16
+; GFX10-NEXT: s_cselect_b32 s16, s17, s16
+; GFX10-NEXT: s_add_i32 s5, s5, s16
+; GFX10-NEXT: s_cmp_gt_i32 s6, 0
+; GFX10-NEXT: s_cselect_b32 s46, s6, 0
+; GFX10-NEXT: s_sub_i32 s16, s32, s46
+; GFX10-NEXT: s_cmp_lt_i32 s6, 0
+; GFX10-NEXT: s_cselect_b32 s17, s6, 0
+; GFX10-NEXT: s_sub_i32 s17, s34, s17
+; GFX10-NEXT: s_cmp_gt_i32 s17, s22
+; GFX10-NEXT: s_cselect_b32 s17, s17, s22
+; GFX10-NEXT: s_cmp_lt_i32 s17, s16
+; GFX10-NEXT: s_cselect_b32 s16, s17, s16
+; GFX10-NEXT: s_add_i32 s6, s6, s16
+; GFX10-NEXT: s_cmp_gt_i32 s7, 0
+; GFX10-NEXT: s_cselect_b32 s46, s7, 0
+; GFX10-NEXT: s_sub_i32 s16, s32, s46
+; GFX10-NEXT: s_cmp_lt_i32 s7, 0
+; GFX10-NEXT: s_cselect_b32 s17, s7, 0
+; GFX10-NEXT: s_sub_i32 s17, s34, s17
+; GFX10-NEXT: s_cmp_gt_i32 s17, s23
+; GFX10-NEXT: s_cselect_b32 s17, s17, s23
+; GFX10-NEXT: s_cmp_lt_i32 s17, s16
+; GFX10-NEXT: s_cselect_b32 s16, s17, s16
+; GFX10-NEXT: s_add_i32 s7, s7, s16
+; GFX10-NEXT: s_cmp_gt_i32 s8, 0
+; GFX10-NEXT: s_cselect_b32 s46, s8, 0
+; GFX10-NEXT: s_sub_i32 s16, s32, s46
+; GFX10-NEXT: s_cmp_lt_i32 s8, 0
+; GFX10-NEXT: s_cselect_b32 s17, s8, 0
+; GFX10-NEXT: s_sub_i32 s17, s34, s17
+; GFX10-NEXT: s_cmp_gt_i32 s17, s24
+; GFX10-NEXT: s_cselect_b32 s17, s17, s24
+; GFX10-NEXT: s_cmp_lt_i32 s17, s16
+; GFX10-NEXT: s_cselect_b32 s16, s17, s16
+; GFX10-NEXT: s_add_i32 s8, s8, s16
+; GFX10-NEXT: s_cmp_gt_i32 s9, 0
+; GFX10-NEXT: s_cselect_b32 s46, s9, 0
+; GFX10-NEXT: s_sub_i32 s16, s32, s46
+; GFX10-NEXT: s_cmp_lt_i32 s9, 0
+; GFX10-NEXT: s_cselect_b32 s17, s9, 0
+; GFX10-NEXT: s_sub_i32 s17, s34, s17
+; GFX10-NEXT: s_cmp_gt_i32 s17, s25
+; GFX10-NEXT: s_cselect_b32 s17, s17, s25
+; GFX10-NEXT: s_cmp_lt_i32 s17, s16
+; GFX10-NEXT: s_cselect_b32 s16, s17, s16
+; GFX10-NEXT: s_add_i32 s9, s9, s16
+; GFX10-NEXT: s_cmp_gt_i32 s10, 0
+; GFX10-NEXT: s_cselect_b32 s46, s10, 0
+; GFX10-NEXT: s_sub_i32 s16, s32, s46
+; GFX10-NEXT: s_cmp_lt_i32 s10, 0
+; GFX10-NEXT: s_cselect_b32 s17, s10, 0
+; GFX10-NEXT: s_sub_i32 s17, s34, s17
+; GFX10-NEXT: s_cmp_gt_i32 s17, s26
+; GFX10-NEXT: s_cselect_b32 s17, s17, s26
+; GFX10-NEXT: s_cmp_lt_i32 s17, s16
+; GFX10-NEXT: s_cselect_b32 s16, s17, s16
+; GFX10-NEXT: s_add_i32 s10, s10, s16
+; GFX10-NEXT: s_cmp_gt_i32 s11, 0
+; GFX10-NEXT: s_cselect_b32 s46, s11, 0
+; GFX10-NEXT: s_sub_i32 s16, s32, s46
+; GFX10-NEXT: s_cmp_lt_i32 s11, 0
+; GFX10-NEXT: s_cselect_b32 s17, s11, 0
+; GFX10-NEXT: s_sub_i32 s17, s34, s17
+; GFX10-NEXT: s_cmp_gt_i32 s17, s27
+; GFX10-NEXT: s_cselect_b32 s17, s17, s27
+; GFX10-NEXT: s_cmp_lt_i32 s17, s16
+; GFX10-NEXT: s_cselect_b32 s16, s17, s16
+; GFX10-NEXT: s_add_i32 s11, s11, s16
+; GFX10-NEXT: s_cmp_gt_i32 s12, 0
+; GFX10-NEXT: s_cselect_b32 s46, s12, 0
+; GFX10-NEXT: s_sub_i32 s16, s32, s46
+; GFX10-NEXT: s_cmp_lt_i32 s12, 0
+; GFX10-NEXT: s_cselect_b32 s17, s12, 0
+; GFX10-NEXT: s_sub_i32 s17, s34, s17
+; GFX10-NEXT: s_cmp_gt_i32 s17, s28
+; GFX10-NEXT: s_cselect_b32 s17, s17, s28
+; GFX10-NEXT: s_cmp_lt_i32 s17, s16
+; GFX10-NEXT: s_cselect_b32 s16, s17, s16
+; GFX10-NEXT: s_add_i32 s12, s12, s16
+; GFX10-NEXT: s_cmp_gt_i32 s13, 0
+; GFX10-NEXT: s_cselect_b32 s46, s13, 0
+; GFX10-NEXT: s_sub_i32 s16, s32, s46
+; GFX10-NEXT: s_cmp_lt_i32 s13, 0
+; GFX10-NEXT: s_cselect_b32 s17, s13, 0
+; GFX10-NEXT: s_sub_i32 s17, s34, s17
+; GFX10-NEXT: s_cmp_gt_i32 s17, s29
+; GFX10-NEXT: s_cselect_b32 s17, s17, s29
+; GFX10-NEXT: s_cmp_lt_i32 s17, s16
+; GFX10-NEXT: s_cselect_b32 s16, s17, s16
+; GFX10-NEXT: s_add_i32 s13, s13, s16
+; GFX10-NEXT: s_cmp_gt_i32 s14, 0
+; GFX10-NEXT: s_cselect_b32 s46, s14, 0
+; GFX10-NEXT: s_sub_i32 s16, s32, s46
+; GFX10-NEXT: s_cmp_lt_i32 s14, 0
+; GFX10-NEXT: s_cselect_b32 s17, s14, 0
+; GFX10-NEXT: s_sub_i32 s17, s34, s17
+; GFX10-NEXT: s_cmp_gt_i32 s17, s30
+; GFX10-NEXT: s_cselect_b32 s17, s17, s30
+; GFX10-NEXT: s_cmp_lt_i32 s17, s16
+; GFX10-NEXT: s_cselect_b32 s16, s17, s16
+; GFX10-NEXT: s_add_i32 s14, s14, s16
+; GFX10-NEXT: s_cmp_gt_i32 s15, 0
+; GFX10-NEXT: s_cselect_b32 s30, s15, 0
+; GFX10-NEXT: s_sub_i32 s16, s32, s30
+; GFX10-NEXT: s_cmp_lt_i32 s15, 0
+; GFX10-NEXT: s_cselect_b32 s17, s15, 0
+; GFX10-NEXT: s_sub_i32 s17, s34, s17
+; GFX10-NEXT: s_cmp_gt_i32 s17, s31
+; GFX10-NEXT: s_cselect_b32 s17, s17, s31
+; GFX10-NEXT: s_cmp_lt_i32 s17, s16
+; GFX10-NEXT: s_cselect_b32 s16, s17, s16
+; GFX10-NEXT: s_add_i32 s15, s15, s16
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32> %lhs, <16 x i32> %rhs)
+ ret <16 x i32> %result
+}
+
+define i16 @v_saddsat_i16(i16 %lhs, i16 %rhs) {
+; GFX6-LABEL: v_saddsat_i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_min_i32_e32 v3, 0, v0
+; GFX6-NEXT: v_max_i32_e32 v2, 0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, 0x80000000, v3
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, 0x7fffffff, v2
+; GFX6-NEXT: v_max_i32_e32 v1, v3, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_i16_e32 v3, 0, v0
+; GFX8-NEXT: v_max_i16_e32 v2, 0, v0
+; GFX8-NEXT: v_sub_u16_e32 v3, 0x8000, v3
+; GFX8-NEXT: v_sub_u16_e32 v2, 0x7fff, v2
+; GFX8-NEXT: v_max_i16_e32 v1, v3, v1
+; GFX8-NEXT: v_min_i16_e32 v1, v1, v2
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_i16_e32 v3, 0, v0
+; GFX9-NEXT: v_max_i16_e32 v2, 0, v0
+; GFX9-NEXT: v_sub_u16_e32 v3, 0x8000, v3
+; GFX9-NEXT: v_sub_u16_e32 v2, 0x7fff, v2
+; GFX9-NEXT: v_max_i16_e32 v1, v3, v1
+; GFX9-NEXT: v_min_i16_e32 v1, v1, v2
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_i16_e64 v2, v0, 0
+; GFX10-NEXT: v_max_i16_e64 v3, v0, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u16_e64 v2, 0x8000, v2
+; GFX10-NEXT: v_sub_nc_u16_e64 v3, 0x7fff, v3
+; GFX10-NEXT: v_max_i16_e64 v1, v2, v1
+; GFX10-NEXT: v_min_i16_e64 v1, v1, v3
+; GFX10-NEXT: v_add_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i16 @llvm.sadd.sat.i16(i16 %lhs, i16 %rhs)
+ ret i16 %result
+}
+
+define amdgpu_ps i16 @s_saddsat_i16(i16 inreg %lhs, i16 inreg %rhs) {
+; GFX6-LABEL: s_saddsat_i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s2, s0, 0
+; GFX6-NEXT: s_sub_i32 s2, 0x7fffffff, s2
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s3, s0, 0
+; GFX6-NEXT: s_sub_i32 s3, 0x80000000, s3
+; GFX6-NEXT: s_cmp_gt_i32 s3, s1
+; GFX6-NEXT: s_cselect_b32 s1, s3, s1
+; GFX6-NEXT: s_cmp_lt_i32 s1, s2
+; GFX6-NEXT: s_cselect_b32 s1, s1, s2
+; GFX6-NEXT: s_add_i32 s0, s0, s1
+; GFX6-NEXT: s_ashr_i32 s0, s0, 16
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_sext_i32_i16 s2, s0
+; GFX8-NEXT: s_sext_i32_i16 s3, 0
+; GFX8-NEXT: s_cmp_gt_i32 s2, s3
+; GFX8-NEXT: s_cselect_b32 s4, s2, s3
+; GFX8-NEXT: s_sub_i32 s4, 0x7fff, s4
+; GFX8-NEXT: s_cmp_lt_i32 s2, s3
+; GFX8-NEXT: s_cselect_b32 s2, s2, s3
+; GFX8-NEXT: s_sub_i32 s2, 0x8000, s2
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_cmp_gt_i32 s2, s1
+; GFX8-NEXT: s_cselect_b32 s1, s2, s1
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_sext_i32_i16 s2, s4
+; GFX8-NEXT: s_cmp_lt_i32 s1, s2
+; GFX8-NEXT: s_cselect_b32 s1, s1, s2
+; GFX8-NEXT: s_add_i32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_sext_i32_i16 s2, s0
+; GFX9-NEXT: s_sext_i32_i16 s3, 0
+; GFX9-NEXT: s_cmp_gt_i32 s2, s3
+; GFX9-NEXT: s_cselect_b32 s4, s2, s3
+; GFX9-NEXT: s_sub_i32 s4, 0x7fff, s4
+; GFX9-NEXT: s_cmp_lt_i32 s2, s3
+; GFX9-NEXT: s_cselect_b32 s2, s2, s3
+; GFX9-NEXT: s_sub_i32 s2, 0x8000, s2
+; GFX9-NEXT: s_sext_i32_i16 s2, s2
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_cmp_gt_i32 s2, s1
+; GFX9-NEXT: s_cselect_b32 s1, s2, s1
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_sext_i32_i16 s2, s4
+; GFX9-NEXT: s_cmp_lt_i32 s1, s2
+; GFX9-NEXT: s_cselect_b32 s1, s1, s2
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_sext_i32_i16 s2, 0
+; GFX10-NEXT: s_sext_i32_i16 s3, s0
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_gt_i32 s3, s2
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s4, s3, s2
+; GFX10-NEXT: s_sub_i32 s4, 0x7fff, s4
+; GFX10-NEXT: s_cmp_lt_i32 s3, s2
+; GFX10-NEXT: s_cselect_b32 s2, s3, s2
+; GFX10-NEXT: s_sub_i32 s2, 0x8000, s2
+; GFX10-NEXT: s_sext_i32_i16 s2, s2
+; GFX10-NEXT: s_cmp_gt_i32 s2, s1
+; GFX10-NEXT: s_cselect_b32 s1, s2, s1
+; GFX10-NEXT: s_sext_i32_i16 s2, s4
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s2
+; GFX10-NEXT: s_cselect_b32 s1, s1, s2
+; GFX10-NEXT: s_add_i32 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i16 @llvm.sadd.sat.i16(i16 %lhs, i16 %rhs)
+ ret i16 %result
+}
+
+define amdgpu_ps half @saddsat_i16_sv(i16 inreg %lhs, i16 %rhs) {
+; GFX6-LABEL: saddsat_i16_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s1, s0, 0
+; GFX6-NEXT: s_sub_i32 s1, 0x7fffffff, s1
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s2, s0, 0
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_sub_i32 s2, 0x80000000, s2
+; GFX6-NEXT: v_max_i32_e32 v0, s2, v0
+; GFX6-NEXT: v_min_i32_e32 v0, s1, v0
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: saddsat_i16_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_sext_i32_i16 s1, s0
+; GFX8-NEXT: s_sext_i32_i16 s2, 0
+; GFX8-NEXT: s_cmp_gt_i32 s1, s2
+; GFX8-NEXT: s_cselect_b32 s3, s1, s2
+; GFX8-NEXT: s_sub_i32 s3, 0x7fff, s3
+; GFX8-NEXT: s_cmp_lt_i32 s1, s2
+; GFX8-NEXT: s_cselect_b32 s1, s1, s2
+; GFX8-NEXT: s_sub_i32 s1, 0x8000, s1
+; GFX8-NEXT: v_max_i16_e32 v0, s1, v0
+; GFX8-NEXT: v_min_i16_e32 v0, s3, v0
+; GFX8-NEXT: v_add_u16_e32 v0, s0, v0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: saddsat_i16_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_sext_i32_i16 s1, s0
+; GFX9-NEXT: s_sext_i32_i16 s2, 0
+; GFX9-NEXT: s_cmp_gt_i32 s1, s2
+; GFX9-NEXT: s_cselect_b32 s3, s1, s2
+; GFX9-NEXT: s_sub_i32 s3, 0x7fff, s3
+; GFX9-NEXT: s_cmp_lt_i32 s1, s2
+; GFX9-NEXT: s_cselect_b32 s1, s1, s2
+; GFX9-NEXT: s_sub_i32 s1, 0x8000, s1
+; GFX9-NEXT: v_max_i16_e32 v0, s1, v0
+; GFX9-NEXT: v_min_i16_e32 v0, s3, v0
+; GFX9-NEXT: v_add_u16_e32 v0, s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: saddsat_i16_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_sext_i32_i16 s1, s0
+; GFX10-NEXT: s_sext_i32_i16 s2, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_gt_i32 s1, s2
+; GFX10-NEXT: s_cselect_b32 s3, s1, s2
+; GFX10-NEXT: s_sub_i32 s3, 0x7fff, s3
+; GFX10-NEXT: s_cmp_lt_i32 s1, s2
+; GFX10-NEXT: s_cselect_b32 s1, s1, s2
+; GFX10-NEXT: s_sub_i32 s1, 0x8000, s1
+; GFX10-NEXT: v_max_i16_e64 v0, s1, v0
+; GFX10-NEXT: v_min_i16_e64 v0, v0, s3
+; GFX10-NEXT: v_add_nc_u16_e64 v0, s0, v0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i16 @llvm.sadd.sat.i16(i16 %lhs, i16 %rhs)
+ %cast = bitcast i16 %result to half
+ ret half %cast
+}
+
+define amdgpu_ps half @saddsat_i16_vs(i16 %lhs, i16 inreg %rhs) {
+; GFX6-LABEL: saddsat_i16_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_min_i32_e32 v2, 0, v0
+; GFX6-NEXT: v_max_i32_e32 v1, 0, v0
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, 0x80000000, v2
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, 0x7fffffff, v1
+; GFX6-NEXT: v_max_i32_e32 v2, s0, v2
+; GFX6-NEXT: v_min_i32_e32 v1, v2, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: saddsat_i16_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_min_i16_e32 v2, 0, v0
+; GFX8-NEXT: v_max_i16_e32 v1, 0, v0
+; GFX8-NEXT: v_sub_u16_e32 v2, 0x8000, v2
+; GFX8-NEXT: v_sub_u16_e32 v1, 0x7fff, v1
+; GFX8-NEXT: v_max_i16_e32 v2, s0, v2
+; GFX8-NEXT: v_min_i16_e32 v1, v2, v1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: saddsat_i16_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_i16_e32 v2, 0, v0
+; GFX9-NEXT: v_max_i16_e32 v1, 0, v0
+; GFX9-NEXT: v_sub_u16_e32 v2, 0x8000, v2
+; GFX9-NEXT: v_sub_u16_e32 v1, 0x7fff, v1
+; GFX9-NEXT: v_max_i16_e32 v2, s0, v2
+; GFX9-NEXT: v_min_i16_e32 v1, v2, v1
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: saddsat_i16_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_min_i16_e64 v1, v0, 0
+; GFX10-NEXT: v_max_i16_e64 v2, v0, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u16_e64 v1, 0x8000, v1
+; GFX10-NEXT: v_sub_nc_u16_e64 v2, 0x7fff, v2
+; GFX10-NEXT: v_max_i16_e64 v1, v1, s0
+; GFX10-NEXT: v_min_i16_e64 v1, v1, v2
+; GFX10-NEXT: v_add_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i16 @llvm.sadd.sat.i16(i16 %lhs, i16 %rhs)
+ %cast = bitcast i16 %result to half
+ ret half %cast
+}
+
+define <2 x i16> @v_saddsat_v2i16(<2 x i16> %lhs, <2 x i16> %rhs) {
+; GFX6-LABEL: v_saddsat_v2i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v5, 0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, s5, v5
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v4, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s4, v4
+; GFX6-NEXT: v_max_i32_e32 v2, v5, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_min_i32_e32 v2, v2, v4
+; GFX6-NEXT: v_min_i32_e32 v4, 0, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX6-NEXT: v_max_i32_e32 v3, 0, v1
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s5, v4
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, s4, v3
+; GFX6-NEXT: v_max_i32_e32 v2, v4, v2
+; GFX6-NEXT: v_min_i32_e32 v2, v2, v3
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 16, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_v2i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s5, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v4, 0, v0
+; GFX8-NEXT: v_sub_u16_e32 v4, s5, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v3, 0, v0
+; GFX8-NEXT: v_min_i16_e32 v5, 0, v2
+; GFX8-NEXT: v_sub_u16_e32 v3, s4, v3
+; GFX8-NEXT: v_max_i16_e32 v4, v4, v1
+; GFX8-NEXT: v_min_i16_e32 v3, v4, v3
+; GFX8-NEXT: v_max_i16_e32 v4, 0, v2
+; GFX8-NEXT: v_sub_u16_e32 v5, s5, v5
+; GFX8-NEXT: v_sub_u16_e32 v4, s4, v4
+; GFX8-NEXT: v_max_i16_sdwa v1, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v1, v1, v4
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v3
+; GFX8-NEXT: v_add_u16_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s5, 0xffff8000
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, 0, 0
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s5
+; GFX9-NEXT: v_pk_min_i16 v3, v0, s6
+; GFX9-NEXT: v_pk_sub_i16 v3, s5, v3
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s4
+; GFX9-NEXT: v_pk_max_i16 v2, v0, s6
+; GFX9-NEXT: v_pk_sub_i16 v2, s4, v2
+; GFX9-NEXT: v_pk_max_i16 v1, v3, v1
+; GFX9-NEXT: v_pk_min_i16 v1, v1, v2
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, 0, 0
+; GFX10-NEXT: s_mov_b32 s5, 0xffff8000
+; GFX10-NEXT: v_pk_min_i16 v2, v0, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s5, s5
+; GFX10-NEXT: v_pk_max_i16 v3, v0, s4
+; GFX10-NEXT: s_movk_i32 s6, 0x7fff
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_pk_sub_i16 v2, s5, v2
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s6, s6
+; GFX10-NEXT: v_pk_sub_i16 v3, s4, v3
+; GFX10-NEXT: v_pk_max_i16 v1, v2, v1
+; GFX10-NEXT: v_pk_min_i16 v1, v1, v3
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ ret <2 x i16> %result
+}
+
+define amdgpu_ps i32 @s_saddsat_v2i16(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs) {
+; GFX6-LABEL: s_saddsat_v2i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: s_cselect_b32 s6, s0, 0
+; GFX6-NEXT: s_sub_i32 s6, s4, s6
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s7, s0, 0
+; GFX6-NEXT: s_sub_i32 s7, s5, s7
+; GFX6-NEXT: s_cmp_gt_i32 s7, s2
+; GFX6-NEXT: s_cselect_b32 s2, s7, s2
+; GFX6-NEXT: s_cmp_lt_i32 s2, s6
+; GFX6-NEXT: s_cselect_b32 s2, s2, s6
+; GFX6-NEXT: s_add_i32 s0, s0, s2
+; GFX6-NEXT: s_ashr_i32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s2, s3, 16
+; GFX6-NEXT: s_cmp_gt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s3, s1, 0
+; GFX6-NEXT: s_sub_i32 s3, s4, s3
+; GFX6-NEXT: s_cmp_lt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s4, s1, 0
+; GFX6-NEXT: s_sub_i32 s4, s5, s4
+; GFX6-NEXT: s_cmp_gt_i32 s4, s2
+; GFX6-NEXT: s_cselect_b32 s2, s4, s2
+; GFX6-NEXT: s_cmp_lt_i32 s2, s3
+; GFX6-NEXT: s_cselect_b32 s2, s2, s3
+; GFX6-NEXT: s_add_i32 s1, s1, s2
+; GFX6-NEXT: s_mov_b32 s2, 0xffff
+; GFX6-NEXT: s_ashr_i32 s1, s1, 16
+; GFX6-NEXT: s_and_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s0, s0, s2
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_v2i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshr_b32 s3, s1, 16
+; GFX8-NEXT: s_lshr_b32 s2, s0, 16
+; GFX8-NEXT: s_sext_i32_i16 s6, s0
+; GFX8-NEXT: s_sext_i32_i16 s7, 0
+; GFX8-NEXT: s_cmp_gt_i32 s6, s7
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: s_cselect_b32 s8, s6, s7
+; GFX8-NEXT: s_sub_i32 s8, s4, s8
+; GFX8-NEXT: s_cmp_lt_i32 s6, s7
+; GFX8-NEXT: s_mov_b32 s5, 0x8000
+; GFX8-NEXT: s_cselect_b32 s6, s6, s7
+; GFX8-NEXT: s_sub_i32 s6, s5, s6
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_cmp_gt_i32 s6, s1
+; GFX8-NEXT: s_cselect_b32 s1, s6, s1
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_sext_i32_i16 s6, s8
+; GFX8-NEXT: s_cmp_lt_i32 s1, s6
+; GFX8-NEXT: s_cselect_b32 s1, s1, s6
+; GFX8-NEXT: s_add_i32 s0, s0, s1
+; GFX8-NEXT: s_sext_i32_i16 s1, s2
+; GFX8-NEXT: s_cmp_gt_i32 s1, s7
+; GFX8-NEXT: s_cselect_b32 s6, s1, s7
+; GFX8-NEXT: s_sub_i32 s4, s4, s6
+; GFX8-NEXT: s_cmp_lt_i32 s1, s7
+; GFX8-NEXT: s_cselect_b32 s1, s1, s7
+; GFX8-NEXT: s_sub_i32 s1, s5, s1
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_cmp_gt_i32 s1, s3
+; GFX8-NEXT: s_cselect_b32 s1, s1, s3
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_sext_i32_i16 s3, s4
+; GFX8-NEXT: s_cmp_lt_i32 s1, s3
+; GFX8-NEXT: s_cselect_b32 s1, s1, s3
+; GFX8-NEXT: s_add_i32 s2, s2, s1
+; GFX8-NEXT: s_bfe_u32 s1, s2, 0x100000
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s1, s1, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, 0, 0
+; GFX9-NEXT: s_sext_i32_i16 s7, s4
+; GFX9-NEXT: s_sext_i32_i16 s5, s0
+; GFX9-NEXT: s_ashr_i32 s6, s0, 16
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_cmp_gt_i32 s5, s7
+; GFX9-NEXT: s_cselect_b32 s8, s5, s7
+; GFX9-NEXT: s_cmp_gt_i32 s6, s4
+; GFX9-NEXT: s_movk_i32 s2, 0x7fff
+; GFX9-NEXT: s_cselect_b32 s9, s6, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s2
+; GFX9-NEXT: s_lshr_b32 s9, s2, 16
+; GFX9-NEXT: s_lshr_b32 s10, s8, 16
+; GFX9-NEXT: s_sub_i32 s2, s2, s8
+; GFX9-NEXT: s_sub_i32 s8, s9, s10
+; GFX9-NEXT: s_cmp_lt_i32 s5, s7
+; GFX9-NEXT: s_cselect_b32 s5, s5, s7
+; GFX9-NEXT: s_cmp_lt_i32 s6, s4
+; GFX9-NEXT: s_mov_b32 s3, 0xffff8000
+; GFX9-NEXT: s_cselect_b32 s4, s6, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s5, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s3
+; GFX9-NEXT: s_lshr_b32 s5, s3, 16
+; GFX9-NEXT: s_lshr_b32 s6, s4, 16
+; GFX9-NEXT: s_sub_i32 s3, s3, s4
+; GFX9-NEXT: s_sub_i32 s4, s5, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4
+; GFX9-NEXT: s_sext_i32_i16 s4, s3
+; GFX9-NEXT: s_sext_i32_i16 s5, s1
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_ashr_i32 s1, s1, 16
+; GFX9-NEXT: s_cmp_gt_i32 s4, s5
+; GFX9-NEXT: s_cselect_b32 s4, s4, s5
+; GFX9-NEXT: s_cmp_gt_i32 s3, s1
+; GFX9-NEXT: s_cselect_b32 s1, s3, s1
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s4, s1
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX9-NEXT: s_sext_i32_i16 s3, s1
+; GFX9-NEXT: s_sext_i32_i16 s4, s2
+; GFX9-NEXT: s_ashr_i32 s1, s1, 16
+; GFX9-NEXT: s_ashr_i32 s2, s2, 16
+; GFX9-NEXT: s_cmp_lt_i32 s3, s4
+; GFX9-NEXT: s_cselect_b32 s3, s3, s4
+; GFX9-NEXT: s_cmp_lt_i32 s1, s2
+; GFX9-NEXT: s_cselect_b32 s1, s1, s2
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s3, s1
+; GFX9-NEXT: s_lshr_b32 s2, s0, 16
+; GFX9-NEXT: s_lshr_b32 s3, s1, 16
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: s_add_i32 s2, s2, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, 0, 0
+; GFX10-NEXT: s_sext_i32_i16 s3, s0
+; GFX10-NEXT: s_sext_i32_i16 s5, s2
+; GFX10-NEXT: s_ashr_i32 s4, s0, 16
+; GFX10-NEXT: s_ashr_i32 s2, s2, 16
+; GFX10-NEXT: s_cmp_gt_i32 s3, s5
+; GFX10-NEXT: s_movk_i32 s7, 0x7fff
+; GFX10-NEXT: s_cselect_b32 s6, s3, s5
+; GFX10-NEXT: s_cmp_gt_i32 s4, s2
+; GFX10-NEXT: s_pack_ll_b32_b16 s7, s7, s7
+; GFX10-NEXT: s_cselect_b32 s8, s4, s2
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s6, s8
+; GFX10-NEXT: s_lshr_b32 s8, s7, 16
+; GFX10-NEXT: s_lshr_b32 s9, s6, 16
+; GFX10-NEXT: s_sub_i32 s6, s7, s6
+; GFX10-NEXT: s_sub_i32 s7, s8, s9
+; GFX10-NEXT: s_cmp_lt_i32 s3, s5
+; GFX10-NEXT: s_cselect_b32 s3, s3, s5
+; GFX10-NEXT: s_cmp_lt_i32 s4, s2
+; GFX10-NEXT: s_mov_b32 s5, 0xffff8000
+; GFX10-NEXT: s_cselect_b32 s2, s4, s2
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s5, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s3, s2
+; GFX10-NEXT: s_lshr_b32 s3, s4, 16
+; GFX10-NEXT: s_lshr_b32 s5, s2, 16
+; GFX10-NEXT: s_sub_i32 s2, s4, s2
+; GFX10-NEXT: s_sub_i32 s3, s3, s5
+; GFX10-NEXT: s_sext_i32_i16 s4, s1
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s3
+; GFX10-NEXT: s_ashr_i32 s1, s1, 16
+; GFX10-NEXT: s_sext_i32_i16 s3, s2
+; GFX10-NEXT: s_ashr_i32 s2, s2, 16
+; GFX10-NEXT: s_cmp_gt_i32 s3, s4
+; GFX10-NEXT: s_cselect_b32 s3, s3, s4
+; GFX10-NEXT: s_cmp_gt_i32 s2, s1
+; GFX10-NEXT: s_cselect_b32 s1, s2, s1
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s6, s7
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s3, s1
+; GFX10-NEXT: s_sext_i32_i16 s4, s2
+; GFX10-NEXT: s_sext_i32_i16 s3, s1
+; GFX10-NEXT: s_ashr_i32 s1, s1, 16
+; GFX10-NEXT: s_ashr_i32 s2, s2, 16
+; GFX10-NEXT: s_cmp_lt_i32 s3, s4
+; GFX10-NEXT: s_cselect_b32 s3, s3, s4
+; GFX10-NEXT: s_cmp_lt_i32 s1, s2
+; GFX10-NEXT: s_cselect_b32 s1, s1, s2
+; GFX10-NEXT: s_lshr_b32 s2, s0, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s3, s1
+; GFX10-NEXT: s_lshr_b32 s3, s1, 16
+; GFX10-NEXT: s_add_i32 s0, s0, s1
+; GFX10-NEXT: s_add_i32 s2, s2, s3
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s2
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ %cast = bitcast <2 x i16> %result to i32
+ ret i32 %cast
+}
+
+define amdgpu_ps float @saddsat_v2i16_sv(<2 x i16> inreg %lhs, <2 x i16> %rhs) {
+; GFX6-LABEL: saddsat_v2i16_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_brev_b32 s2, -2
+; GFX6-NEXT: s_cselect_b32 s4, s0, 0
+; GFX6-NEXT: s_sub_i32 s4, s2, s4
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_mov_b32 s3, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s5, s0, 0
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_sub_i32 s5, s3, s5
+; GFX6-NEXT: v_max_i32_e32 v0, s5, v0
+; GFX6-NEXT: v_min_i32_e32 v0, s4, v0
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; GFX6-NEXT: s_lshl_b32 s0, s1, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s1, s0, 0
+; GFX6-NEXT: s_sub_i32 s1, s2, s1
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s2, s0, 0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_sub_i32 s2, s3, s2
+; GFX6-NEXT: v_max_i32_e32 v1, s2, v1
+; GFX6-NEXT: v_min_i32_e32 v1, s1, v1
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, s0, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s0, 0xffff
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s0, v1
+; GFX6-NEXT: v_and_b32_e32 v0, s0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: saddsat_v2i16_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshr_b32 s1, s0, 16
+; GFX8-NEXT: s_sext_i32_i16 s4, s0
+; GFX8-NEXT: s_sext_i32_i16 s5, 0
+; GFX8-NEXT: s_cmp_gt_i32 s4, s5
+; GFX8-NEXT: s_movk_i32 s2, 0x7fff
+; GFX8-NEXT: s_cselect_b32 s6, s4, s5
+; GFX8-NEXT: s_sub_i32 s6, s2, s6
+; GFX8-NEXT: s_cmp_lt_i32 s4, s5
+; GFX8-NEXT: s_mov_b32 s3, 0x8000
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_sub_i32 s4, s3, s4
+; GFX8-NEXT: v_max_i16_e32 v1, s4, v0
+; GFX8-NEXT: s_sext_i32_i16 s4, s1
+; GFX8-NEXT: s_cmp_gt_i32 s4, s5
+; GFX8-NEXT: v_min_i16_e32 v1, s6, v1
+; GFX8-NEXT: s_cselect_b32 s6, s4, s5
+; GFX8-NEXT: s_sub_i32 s2, s2, s6
+; GFX8-NEXT: s_cmp_lt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_sub_i32 s3, s3, s4
+; GFX8-NEXT: v_mov_b32_e32 v2, s3
+; GFX8-NEXT: v_max_i16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v0, s2, v0
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: v_add_u16_e32 v1, s0, v1
+; GFX8-NEXT: v_add_u16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: saddsat_v2i16_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, 0, 0
+; GFX9-NEXT: s_sext_i32_i16 s6, s3
+; GFX9-NEXT: s_sext_i32_i16 s4, s0
+; GFX9-NEXT: s_ashr_i32 s5, s0, 16
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_cmp_gt_i32 s4, s6
+; GFX9-NEXT: s_cselect_b32 s7, s4, s6
+; GFX9-NEXT: s_cmp_gt_i32 s5, s3
+; GFX9-NEXT: s_movk_i32 s1, 0x7fff
+; GFX9-NEXT: s_cselect_b32 s8, s5, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s1
+; GFX9-NEXT: s_lshr_b32 s8, s1, 16
+; GFX9-NEXT: s_lshr_b32 s9, s7, 16
+; GFX9-NEXT: s_sub_i32 s1, s1, s7
+; GFX9-NEXT: s_sub_i32 s7, s8, s9
+; GFX9-NEXT: s_cmp_lt_i32 s4, s6
+; GFX9-NEXT: s_cselect_b32 s4, s4, s6
+; GFX9-NEXT: s_cmp_lt_i32 s5, s3
+; GFX9-NEXT: s_mov_b32 s2, 0xffff8000
+; GFX9-NEXT: s_cselect_b32 s3, s5, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s4, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s2
+; GFX9-NEXT: s_lshr_b32 s4, s2, 16
+; GFX9-NEXT: s_lshr_b32 s5, s3, 16
+; GFX9-NEXT: s_sub_i32 s2, s2, s3
+; GFX9-NEXT: s_sub_i32 s3, s4, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s7
+; GFX9-NEXT: v_pk_max_i16 v0, s2, v0
+; GFX9-NEXT: v_pk_min_i16 v0, v0, s1
+; GFX9-NEXT: v_pk_add_u16 v0, s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: saddsat_v2i16_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, 0, 0
+; GFX10-NEXT: s_sext_i32_i16 s2, s0
+; GFX10-NEXT: s_sext_i32_i16 s4, s1
+; GFX10-NEXT: s_ashr_i32 s3, s0, 16
+; GFX10-NEXT: s_ashr_i32 s1, s1, 16
+; GFX10-NEXT: s_cmp_gt_i32 s2, s4
+; GFX10-NEXT: s_movk_i32 s6, 0x7fff
+; GFX10-NEXT: s_cselect_b32 s5, s2, s4
+; GFX10-NEXT: s_cmp_gt_i32 s3, s1
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s6, s6
+; GFX10-NEXT: s_cselect_b32 s7, s3, s1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s5, s7
+; GFX10-NEXT: s_lshr_b32 s7, s6, 16
+; GFX10-NEXT: s_lshr_b32 s8, s5, 16
+; GFX10-NEXT: s_sub_i32 s5, s6, s5
+; GFX10-NEXT: s_sub_i32 s6, s7, s8
+; GFX10-NEXT: s_cmp_lt_i32 s2, s4
+; GFX10-NEXT: s_cselect_b32 s2, s2, s4
+; GFX10-NEXT: s_cmp_lt_i32 s3, s1
+; GFX10-NEXT: s_mov_b32 s4, 0xffff8000
+; GFX10-NEXT: s_cselect_b32 s1, s3, s1
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s4, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s2, s1
+; GFX10-NEXT: s_lshr_b32 s2, s3, 16
+; GFX10-NEXT: s_lshr_b32 s4, s1, 16
+; GFX10-NEXT: s_sub_i32 s1, s3, s1
+; GFX10-NEXT: s_sub_i32 s2, s2, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s2
+; GFX10-NEXT: v_pk_max_i16 v0, s1, v0
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s5, s6
+; GFX10-NEXT: v_pk_min_i16 v0, v0, s1
+; GFX10-NEXT: v_pk_add_u16 v0, s0, v0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ %cast = bitcast <2 x i16> %result to float
+ ret float %cast
+}
+
+define amdgpu_ps float @saddsat_v2i16_vs(<2 x i16> %lhs, <2 x i16> inreg %rhs) {
+; GFX6-LABEL: saddsat_v2i16_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_mov_b32 s3, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v3, 0, v0
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, s3, v3
+; GFX6-NEXT: s_brev_b32 s2, -2
+; GFX6-NEXT: v_max_i32_e32 v2, 0, v0
+; GFX6-NEXT: v_max_i32_e32 v3, s0, v3
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s2, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_min_i32_e32 v2, v3, v2
+; GFX6-NEXT: v_min_i32_e32 v3, 0, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_max_i32_e32 v2, 0, v1
+; GFX6-NEXT: s_lshl_b32 s0, s1, 16
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, s3, v3
+; GFX6-NEXT: v_max_i32_e32 v3, s0, v3
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s2, v2
+; GFX6-NEXT: v_min_i32_e32 v2, v3, v2
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s0, 0xffff
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s0, v1
+; GFX6-NEXT: v_and_b32_e32 v0, s0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: saddsat_v2i16_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_mov_b32 s3, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v3, 0, v0
+; GFX8-NEXT: v_sub_u16_e32 v3, s3, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: s_movk_i32 s2, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v2, 0, v0
+; GFX8-NEXT: v_min_i16_e32 v4, 0, v1
+; GFX8-NEXT: v_sub_u16_e32 v2, s2, v2
+; GFX8-NEXT: v_max_i16_e32 v3, s0, v3
+; GFX8-NEXT: v_min_i16_e32 v2, v3, v2
+; GFX8-NEXT: v_max_i16_e32 v3, 0, v1
+; GFX8-NEXT: s_lshr_b32 s1, s0, 16
+; GFX8-NEXT: v_sub_u16_e32 v4, s3, v4
+; GFX8-NEXT: v_sub_u16_e32 v3, s2, v3
+; GFX8-NEXT: v_max_i16_e32 v4, s1, v4
+; GFX8-NEXT: v_min_i16_e32 v3, v4, v3
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v2
+; GFX8-NEXT: v_add_u16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: saddsat_v2i16_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s2, 0xffff8000
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, 0, 0
+; GFX9-NEXT: s_movk_i32 s1, 0x7fff
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s2
+; GFX9-NEXT: v_pk_min_i16 v2, v0, s3
+; GFX9-NEXT: v_pk_sub_i16 v2, s2, v2
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s1
+; GFX9-NEXT: v_pk_max_i16 v1, v0, s3
+; GFX9-NEXT: v_pk_sub_i16 v1, s1, v1
+; GFX9-NEXT: v_pk_max_i16 v2, v2, s0
+; GFX9-NEXT: v_pk_min_i16 v1, v2, v1
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: saddsat_v2i16_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, 0, 0
+; GFX10-NEXT: s_mov_b32 s2, 0xffff8000
+; GFX10-NEXT: v_pk_min_i16 v1, v0, s1
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s2
+; GFX10-NEXT: v_pk_max_i16 v2, v0, s1
+; GFX10-NEXT: s_movk_i32 s3, 0x7fff
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_pk_sub_i16 v1, s2, v1
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s3, s3
+; GFX10-NEXT: v_pk_sub_i16 v2, s1, v2
+; GFX10-NEXT: v_pk_max_i16 v1, v1, s0
+; GFX10-NEXT: v_pk_min_i16 v1, v1, v2
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ %cast = bitcast <2 x i16> %result to float
+ ret float %cast
+}
+
+; FIXME: v3i16 insert/extract
+; define <3 x i16> @v_saddsat_v3i16(<3 x i16> %lhs, <3 x i16> %rhs) {
+; %result = call <3 x i16> @llvm.sadd.sat.v3i16(<3 x i16> %lhs, <3 x i16> %rhs)
+; ret <3 x i16> %result
+; }
+
+; define amdgpu_ps <3 x i16> @s_saddsat_v3i16(<3 x i16> inreg %lhs, <3 x i16> inreg %rhs) {
+; %result = call <3 x i16> @llvm.sadd.sat.v3i16(<3 x i16> %lhs, <3 x i16> %rhs)
+; ret <3 x i16> %result
+; }
+
+define <2 x float> @v_saddsat_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; GFX6-LABEL: v_saddsat_v4i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v10, 0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, s5, v10
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v8, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, s4, v8
+; GFX6-NEXT: v_max_i32_e32 v4, v10, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_min_i32_e32 v4, v4, v8
+; GFX6-NEXT: v_min_i32_e32 v8, 0, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v5
+; GFX6-NEXT: v_max_i32_e32 v5, 0, v1
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, s5, v8
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, s4, v5
+; GFX6-NEXT: v_max_i32_e32 v4, v8, v4
+; GFX6-NEXT: v_min_i32_e32 v4, v4, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX6-NEXT: v_min_i32_e32 v6, 0, v2
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, s5, v6
+; GFX6-NEXT: v_bfrev_b32_e32 v9, -2
+; GFX6-NEXT: v_max_i32_e32 v5, 0, v2
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v9, v5
+; GFX6-NEXT: v_max_i32_e32 v4, v6, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_min_i32_e32 v4, v4, v5
+; GFX6-NEXT: v_mov_b32_e32 v11, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v6, 0, v3
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4
+; GFX6-NEXT: v_max_i32_e32 v5, 0, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v7
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, v11, v6
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v9, v5
+; GFX6-NEXT: v_max_i32_e32 v4, v6, v4
+; GFX6-NEXT: v_min_i32_e32 v4, v4, v5
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v4
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v2, 16, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_v4i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s5, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v7, 0, v0
+; GFX8-NEXT: v_sub_u16_e32 v7, s5, v7
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v6, 0, v0
+; GFX8-NEXT: v_min_i16_e32 v8, 0, v4
+; GFX8-NEXT: v_sub_u16_e32 v6, s4, v6
+; GFX8-NEXT: v_max_i16_e32 v7, v7, v2
+; GFX8-NEXT: v_min_i16_e32 v6, v7, v6
+; GFX8-NEXT: v_max_i16_e32 v7, 0, v4
+; GFX8-NEXT: v_sub_u16_e32 v8, s5, v8
+; GFX8-NEXT: v_max_i16_sdwa v2, v8, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v8, 0, v1
+; GFX8-NEXT: v_sub_u16_e32 v7, s4, v7
+; GFX8-NEXT: v_min_i16_e32 v2, v2, v7
+; GFX8-NEXT: v_max_i16_e32 v7, 0, v1
+; GFX8-NEXT: v_sub_u16_e32 v8, s5, v8
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX8-NEXT: v_min_i16_e32 v9, 0, v5
+; GFX8-NEXT: v_sub_u16_e32 v7, s4, v7
+; GFX8-NEXT: v_max_i16_e32 v8, v8, v3
+; GFX8-NEXT: v_min_i16_e32 v7, v8, v7
+; GFX8-NEXT: v_max_i16_e32 v8, 0, v5
+; GFX8-NEXT: v_sub_u16_e32 v9, s5, v9
+; GFX8-NEXT: v_sub_u16_e32 v8, s4, v8
+; GFX8-NEXT: v_max_i16_sdwa v3, v9, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v3, v3, v8
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v6
+; GFX8-NEXT: v_add_u16_sdwa v2, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_add_u16_e32 v1, v1, v7
+; GFX8-NEXT: v_add_u16_sdwa v2, v5, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_v4i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s5, 0xffff8000
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, 0, 0
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s5
+; GFX9-NEXT: v_pk_min_i16 v5, v0, s6
+; GFX9-NEXT: v_pk_sub_i16 v5, s5, v5
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s4
+; GFX9-NEXT: v_pk_max_i16 v4, v0, s6
+; GFX9-NEXT: v_pk_sub_i16 v4, s4, v4
+; GFX9-NEXT: v_pk_max_i16 v2, v5, v2
+; GFX9-NEXT: v_pk_min_i16 v2, v2, v4
+; GFX9-NEXT: v_pk_min_i16 v4, v1, s6
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v2
+; GFX9-NEXT: v_pk_max_i16 v2, v1, s6
+; GFX9-NEXT: v_pk_sub_i16 v4, s5, v4
+; GFX9-NEXT: v_pk_sub_i16 v2, s4, v2
+; GFX9-NEXT: v_pk_max_i16 v3, v4, v3
+; GFX9-NEXT: v_pk_min_i16 v2, v3, v2
+; GFX9-NEXT: v_pk_add_u16 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_v4i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, 0, 0
+; GFX10-NEXT: s_mov_b32 s6, 0xffff8000
+; GFX10-NEXT: v_pk_min_i16 v4, v0, s5
+; GFX10-NEXT: v_pk_min_i16 v5, v1, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s6, s6
+; GFX10-NEXT: v_pk_max_i16 v6, v0, s5
+; GFX10-NEXT: v_pk_max_i16 v7, v1, s5
+; GFX10-NEXT: v_pk_sub_i16 v4, s6, v4
+; GFX10-NEXT: v_pk_sub_i16 v5, s6, v5
+; GFX10-NEXT: s_movk_i32 s4, 0x7fff
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s4, s4
+; GFX10-NEXT: v_pk_max_i16 v11, v4, v2
+; GFX10-NEXT: v_pk_sub_i16 v6, s4, v6
+; GFX10-NEXT: v_pk_sub_i16 v4, s4, v7
+; GFX10-NEXT: v_pk_max_i16 v3, v5, v3
+; GFX10-NEXT: v_pk_min_i16 v2, v11, v6
+; GFX10-NEXT: v_pk_min_i16 v3, v3, v4
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v2
+; GFX10-NEXT: v_pk_add_u16 v1, v1, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+ %cast = bitcast <4 x i16> %result to <2 x float>
+ ret <2 x float> %cast
+}
+
+define amdgpu_ps <2 x i32> @s_saddsat_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %rhs) {
+; GFX6-LABEL: s_saddsat_v4i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_brev_b32 s8, -2
+; GFX6-NEXT: s_cselect_b32 s10, s0, 0
+; GFX6-NEXT: s_sub_i32 s10, s8, s10
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_mov_b32 s9, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s11, s0, 0
+; GFX6-NEXT: s_sub_i32 s11, s9, s11
+; GFX6-NEXT: s_cmp_gt_i32 s11, s4
+; GFX6-NEXT: s_cselect_b32 s4, s11, s4
+; GFX6-NEXT: s_cmp_lt_i32 s4, s10
+; GFX6-NEXT: s_cselect_b32 s4, s4, s10
+; GFX6-NEXT: s_add_i32 s0, s0, s4
+; GFX6-NEXT: s_ashr_i32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s4, s5, 16
+; GFX6-NEXT: s_cmp_gt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s5, s1, 0
+; GFX6-NEXT: s_sub_i32 s5, s8, s5
+; GFX6-NEXT: s_cmp_lt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s10, s1, 0
+; GFX6-NEXT: s_sub_i32 s10, s9, s10
+; GFX6-NEXT: s_cmp_gt_i32 s10, s4
+; GFX6-NEXT: s_cselect_b32 s4, s10, s4
+; GFX6-NEXT: s_cmp_lt_i32 s4, s5
+; GFX6-NEXT: s_cselect_b32 s4, s4, s5
+; GFX6-NEXT: s_add_i32 s1, s1, s4
+; GFX6-NEXT: s_ashr_i32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s4, s6, 16
+; GFX6-NEXT: s_cmp_gt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s5, s2, 0
+; GFX6-NEXT: s_sub_i32 s5, s8, s5
+; GFX6-NEXT: s_cmp_lt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s6, s2, 0
+; GFX6-NEXT: s_sub_i32 s6, s9, s6
+; GFX6-NEXT: s_cmp_gt_i32 s6, s4
+; GFX6-NEXT: s_cselect_b32 s4, s6, s4
+; GFX6-NEXT: s_cmp_lt_i32 s4, s5
+; GFX6-NEXT: s_cselect_b32 s4, s4, s5
+; GFX6-NEXT: s_add_i32 s2, s2, s4
+; GFX6-NEXT: s_ashr_i32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s4, s7, 16
+; GFX6-NEXT: s_cmp_gt_i32 s3, 0
+; GFX6-NEXT: s_cselect_b32 s5, s3, 0
+; GFX6-NEXT: s_sub_i32 s5, s8, s5
+; GFX6-NEXT: s_cmp_lt_i32 s3, 0
+; GFX6-NEXT: s_cselect_b32 s6, s3, 0
+; GFX6-NEXT: s_sub_i32 s6, s9, s6
+; GFX6-NEXT: s_cmp_gt_i32 s6, s4
+; GFX6-NEXT: s_cselect_b32 s4, s6, s4
+; GFX6-NEXT: s_cmp_lt_i32 s4, s5
+; GFX6-NEXT: s_cselect_b32 s4, s4, s5
+; GFX6-NEXT: s_add_i32 s3, s3, s4
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: s_and_b32 s1, s1, s4
+; GFX6-NEXT: s_ashr_i32 s3, s3, 16
+; GFX6-NEXT: s_and_b32 s0, s0, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s4
+; GFX6-NEXT: s_and_b32 s2, s3, s4
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_or_b32 s1, s1, s2
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_v4i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshr_b32 s6, s2, 16
+; GFX8-NEXT: s_lshr_b32 s7, s3, 16
+; GFX8-NEXT: s_lshr_b32 s4, s0, 16
+; GFX8-NEXT: s_lshr_b32 s5, s1, 16
+; GFX8-NEXT: s_sext_i32_i16 s10, s0
+; GFX8-NEXT: s_sext_i32_i16 s11, 0
+; GFX8-NEXT: s_cmp_gt_i32 s10, s11
+; GFX8-NEXT: s_movk_i32 s8, 0x7fff
+; GFX8-NEXT: s_cselect_b32 s12, s10, s11
+; GFX8-NEXT: s_sub_i32 s12, s8, s12
+; GFX8-NEXT: s_cmp_lt_i32 s10, s11
+; GFX8-NEXT: s_mov_b32 s9, 0x8000
+; GFX8-NEXT: s_cselect_b32 s10, s10, s11
+; GFX8-NEXT: s_sub_i32 s10, s9, s10
+; GFX8-NEXT: s_sext_i32_i16 s10, s10
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_cmp_gt_i32 s10, s2
+; GFX8-NEXT: s_cselect_b32 s2, s10, s2
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_sext_i32_i16 s10, s12
+; GFX8-NEXT: s_cmp_lt_i32 s2, s10
+; GFX8-NEXT: s_cselect_b32 s2, s2, s10
+; GFX8-NEXT: s_add_i32 s0, s0, s2
+; GFX8-NEXT: s_sext_i32_i16 s2, s4
+; GFX8-NEXT: s_cmp_gt_i32 s2, s11
+; GFX8-NEXT: s_cselect_b32 s10, s2, s11
+; GFX8-NEXT: s_sub_i32 s10, s8, s10
+; GFX8-NEXT: s_cmp_lt_i32 s2, s11
+; GFX8-NEXT: s_cselect_b32 s2, s2, s11
+; GFX8-NEXT: s_sub_i32 s2, s9, s2
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_cmp_gt_i32 s2, s6
+; GFX8-NEXT: s_cselect_b32 s2, s2, s6
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_sext_i32_i16 s6, s10
+; GFX8-NEXT: s_cmp_lt_i32 s2, s6
+; GFX8-NEXT: s_cselect_b32 s2, s2, s6
+; GFX8-NEXT: s_add_i32 s4, s4, s2
+; GFX8-NEXT: s_sext_i32_i16 s2, s1
+; GFX8-NEXT: s_cmp_gt_i32 s2, s11
+; GFX8-NEXT: s_cselect_b32 s6, s2, s11
+; GFX8-NEXT: s_sub_i32 s6, s8, s6
+; GFX8-NEXT: s_cmp_lt_i32 s2, s11
+; GFX8-NEXT: s_cselect_b32 s2, s2, s11
+; GFX8-NEXT: s_sub_i32 s2, s9, s2
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_cmp_gt_i32 s2, s3
+; GFX8-NEXT: s_cselect_b32 s2, s2, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s6
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_cmp_lt_i32 s2, s3
+; GFX8-NEXT: s_cselect_b32 s2, s2, s3
+; GFX8-NEXT: s_add_i32 s1, s1, s2
+; GFX8-NEXT: s_sext_i32_i16 s2, s5
+; GFX8-NEXT: s_cmp_gt_i32 s2, s11
+; GFX8-NEXT: s_cselect_b32 s3, s2, s11
+; GFX8-NEXT: s_sub_i32 s3, s8, s3
+; GFX8-NEXT: s_cmp_lt_i32 s2, s11
+; GFX8-NEXT: s_cselect_b32 s2, s2, s11
+; GFX8-NEXT: s_sub_i32 s2, s9, s2
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_sext_i32_i16 s6, s7
+; GFX8-NEXT: s_cmp_gt_i32 s2, s6
+; GFX8-NEXT: s_cselect_b32 s2, s2, s6
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_cmp_lt_i32 s2, s3
+; GFX8-NEXT: s_cselect_b32 s2, s2, s3
+; GFX8-NEXT: s_add_i32 s5, s5, s2
+; GFX8-NEXT: s_bfe_u32 s2, s4, 0x100000
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s2, s2, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s2
+; GFX8-NEXT: s_bfe_u32 s2, s5, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_lshl_b32 s2, s2, 16
+; GFX8-NEXT: s_or_b32 s1, s1, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_v4i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, 0, 0
+; GFX9-NEXT: s_sext_i32_i16 s9, s6
+; GFX9-NEXT: s_sext_i32_i16 s7, s0
+; GFX9-NEXT: s_ashr_i32 s8, s0, 16
+; GFX9-NEXT: s_ashr_i32 s6, s6, 16
+; GFX9-NEXT: s_cmp_gt_i32 s7, s9
+; GFX9-NEXT: s_cselect_b32 s10, s7, s9
+; GFX9-NEXT: s_cmp_gt_i32 s8, s6
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: s_cselect_b32 s11, s8, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s11
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s4
+; GFX9-NEXT: s_lshr_b32 s12, s10, 16
+; GFX9-NEXT: s_lshr_b32 s11, s4, 16
+; GFX9-NEXT: s_sub_i32 s10, s4, s10
+; GFX9-NEXT: s_sub_i32 s12, s11, s12
+; GFX9-NEXT: s_cmp_lt_i32 s7, s9
+; GFX9-NEXT: s_cselect_b32 s7, s7, s9
+; GFX9-NEXT: s_cmp_lt_i32 s8, s6
+; GFX9-NEXT: s_mov_b32 s5, 0xffff8000
+; GFX9-NEXT: s_cselect_b32 s8, s8, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s12
+; GFX9-NEXT: s_lshr_b32 s12, s7, 16
+; GFX9-NEXT: s_lshr_b32 s8, s5, 16
+; GFX9-NEXT: s_sub_i32 s7, s5, s7
+; GFX9-NEXT: s_sub_i32 s12, s8, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s12
+; GFX9-NEXT: s_sext_i32_i16 s12, s7
+; GFX9-NEXT: s_sext_i32_i16 s13, s2
+; GFX9-NEXT: s_ashr_i32 s7, s7, 16
+; GFX9-NEXT: s_ashr_i32 s2, s2, 16
+; GFX9-NEXT: s_cmp_gt_i32 s12, s13
+; GFX9-NEXT: s_cselect_b32 s12, s12, s13
+; GFX9-NEXT: s_cmp_gt_i32 s7, s2
+; GFX9-NEXT: s_cselect_b32 s2, s7, s2
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s12, s2
+; GFX9-NEXT: s_sext_i32_i16 s7, s2
+; GFX9-NEXT: s_sext_i32_i16 s12, s10
+; GFX9-NEXT: s_ashr_i32 s2, s2, 16
+; GFX9-NEXT: s_ashr_i32 s10, s10, 16
+; GFX9-NEXT: s_cmp_lt_i32 s7, s12
+; GFX9-NEXT: s_cselect_b32 s7, s7, s12
+; GFX9-NEXT: s_cmp_lt_i32 s2, s10
+; GFX9-NEXT: s_cselect_b32 s2, s2, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s7, s2
+; GFX9-NEXT: s_lshr_b32 s7, s0, 16
+; GFX9-NEXT: s_lshr_b32 s10, s2, 16
+; GFX9-NEXT: s_add_i32 s0, s0, s2
+; GFX9-NEXT: s_add_i32 s7, s7, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s7
+; GFX9-NEXT: s_sext_i32_i16 s2, s1
+; GFX9-NEXT: s_ashr_i32 s7, s1, 16
+; GFX9-NEXT: s_cmp_gt_i32 s2, s9
+; GFX9-NEXT: s_cselect_b32 s10, s2, s9
+; GFX9-NEXT: s_cmp_gt_i32 s7, s6
+; GFX9-NEXT: s_cselect_b32 s12, s7, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s12
+; GFX9-NEXT: s_lshr_b32 s12, s10, 16
+; GFX9-NEXT: s_sub_i32 s4, s4, s10
+; GFX9-NEXT: s_sub_i32 s10, s11, s12
+; GFX9-NEXT: s_cmp_lt_i32 s2, s9
+; GFX9-NEXT: s_cselect_b32 s2, s2, s9
+; GFX9-NEXT: s_cmp_lt_i32 s7, s6
+; GFX9-NEXT: s_cselect_b32 s6, s7, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s6
+; GFX9-NEXT: s_lshr_b32 s6, s2, 16
+; GFX9-NEXT: s_sub_i32 s2, s5, s2
+; GFX9-NEXT: s_sub_i32 s5, s8, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s5
+; GFX9-NEXT: s_sext_i32_i16 s5, s2
+; GFX9-NEXT: s_sext_i32_i16 s6, s3
+; GFX9-NEXT: s_ashr_i32 s2, s2, 16
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_cmp_gt_i32 s5, s6
+; GFX9-NEXT: s_cselect_b32 s5, s5, s6
+; GFX9-NEXT: s_cmp_gt_i32 s2, s3
+; GFX9-NEXT: s_cselect_b32 s2, s2, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s5, s2
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s10
+; GFX9-NEXT: s_sext_i32_i16 s3, s2
+; GFX9-NEXT: s_sext_i32_i16 s5, s4
+; GFX9-NEXT: s_ashr_i32 s2, s2, 16
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_cmp_lt_i32 s3, s5
+; GFX9-NEXT: s_cselect_b32 s3, s3, s5
+; GFX9-NEXT: s_cmp_lt_i32 s2, s4
+; GFX9-NEXT: s_cselect_b32 s2, s2, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s3, s2
+; GFX9-NEXT: s_lshr_b32 s3, s1, 16
+; GFX9-NEXT: s_lshr_b32 s4, s2, 16
+; GFX9-NEXT: s_add_i32 s1, s1, s2
+; GFX9-NEXT: s_add_i32 s3, s3, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_v4i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, 0, 0
+; GFX10-NEXT: s_sext_i32_i16 s5, s0
+; GFX10-NEXT: s_sext_i32_i16 s7, s4
+; GFX10-NEXT: s_ashr_i32 s6, s0, 16
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_cmp_gt_i32 s5, s7
+; GFX10-NEXT: s_movk_i32 s9, 0x7fff
+; GFX10-NEXT: s_cselect_b32 s8, s5, s7
+; GFX10-NEXT: s_cmp_gt_i32 s6, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s9, s9, s9
+; GFX10-NEXT: s_cselect_b32 s10, s6, s4
+; GFX10-NEXT: s_mov_b32 s12, 0xffff8000
+; GFX10-NEXT: s_pack_ll_b32_b16 s8, s8, s10
+; GFX10-NEXT: s_lshr_b32 s10, s9, 16
+; GFX10-NEXT: s_lshr_b32 s11, s8, 16
+; GFX10-NEXT: s_sub_i32 s8, s9, s8
+; GFX10-NEXT: s_sub_i32 s11, s10, s11
+; GFX10-NEXT: s_cmp_lt_i32 s5, s7
+; GFX10-NEXT: s_pack_ll_b32_b16 s12, s12, s12
+; GFX10-NEXT: s_cselect_b32 s5, s5, s7
+; GFX10-NEXT: s_cmp_lt_i32 s6, s4
+; GFX10-NEXT: s_sext_i32_i16 s14, s2
+; GFX10-NEXT: s_cselect_b32 s6, s6, s4
+; GFX10-NEXT: s_ashr_i32 s2, s2, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX10-NEXT: s_lshr_b32 s6, s12, 16
+; GFX10-NEXT: s_lshr_b32 s13, s5, 16
+; GFX10-NEXT: s_sub_i32 s5, s12, s5
+; GFX10-NEXT: s_sub_i32 s13, s6, s13
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s5, s13
+; GFX10-NEXT: s_sext_i32_i16 s13, s5
+; GFX10-NEXT: s_ashr_i32 s5, s5, 16
+; GFX10-NEXT: s_cmp_gt_i32 s13, s14
+; GFX10-NEXT: s_cselect_b32 s13, s13, s14
+; GFX10-NEXT: s_cmp_gt_i32 s5, s2
+; GFX10-NEXT: s_cselect_b32 s2, s5, s2
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s8, s11
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s13, s2
+; GFX10-NEXT: s_sext_i32_i16 s11, s5
+; GFX10-NEXT: s_sext_i32_i16 s8, s2
+; GFX10-NEXT: s_ashr_i32 s2, s2, 16
+; GFX10-NEXT: s_ashr_i32 s5, s5, 16
+; GFX10-NEXT: s_cmp_lt_i32 s8, s11
+; GFX10-NEXT: s_cselect_b32 s8, s8, s11
+; GFX10-NEXT: s_cmp_lt_i32 s2, s5
+; GFX10-NEXT: s_cselect_b32 s2, s2, s5
+; GFX10-NEXT: s_lshr_b32 s5, s0, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s8, s2
+; GFX10-NEXT: s_lshr_b32 s8, s2, 16
+; GFX10-NEXT: s_add_i32 s0, s0, s2
+; GFX10-NEXT: s_sext_i32_i16 s2, s1
+; GFX10-NEXT: s_add_i32 s5, s5, s8
+; GFX10-NEXT: s_ashr_i32 s8, s1, 16
+; GFX10-NEXT: s_cmp_gt_i32 s2, s7
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s5
+; GFX10-NEXT: s_cselect_b32 s11, s2, s7
+; GFX10-NEXT: s_cmp_gt_i32 s8, s4
+; GFX10-NEXT: s_cselect_b32 s13, s8, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s11, s11, s13
+; GFX10-NEXT: s_lshr_b32 s13, s11, 16
+; GFX10-NEXT: s_sub_i32 s9, s9, s11
+; GFX10-NEXT: s_sub_i32 s10, s10, s13
+; GFX10-NEXT: s_cmp_lt_i32 s2, s7
+; GFX10-NEXT: s_cselect_b32 s2, s2, s7
+; GFX10-NEXT: s_cmp_lt_i32 s8, s4
+; GFX10-NEXT: s_cselect_b32 s4, s8, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s4
+; GFX10-NEXT: s_lshr_b32 s4, s2, 16
+; GFX10-NEXT: s_sub_i32 s2, s12, s2
+; GFX10-NEXT: s_sub_i32 s4, s6, s4
+; GFX10-NEXT: s_sext_i32_i16 s6, s3
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s4
+; GFX10-NEXT: s_ashr_i32 s3, s3, 16
+; GFX10-NEXT: s_sext_i32_i16 s4, s2
+; GFX10-NEXT: s_ashr_i32 s2, s2, 16
+; GFX10-NEXT: s_cmp_gt_i32 s4, s6
+; GFX10-NEXT: s_cselect_b32 s4, s4, s6
+; GFX10-NEXT: s_cmp_gt_i32 s2, s3
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s9, s10
+; GFX10-NEXT: s_cselect_b32 s2, s2, s3
+; GFX10-NEXT: s_sext_i32_i16 s3, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s4, s2
+; GFX10-NEXT: s_ashr_i32 s4, s6, 16
+; GFX10-NEXT: s_sext_i32_i16 s6, s2
+; GFX10-NEXT: s_ashr_i32 s2, s2, 16
+; GFX10-NEXT: s_cmp_lt_i32 s6, s3
+; GFX10-NEXT: s_cselect_b32 s3, s6, s3
+; GFX10-NEXT: s_cmp_lt_i32 s2, s4
+; GFX10-NEXT: s_cselect_b32 s2, s2, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s3, s2
+; GFX10-NEXT: s_lshr_b32 s3, s1, 16
+; GFX10-NEXT: s_lshr_b32 s4, s2, 16
+; GFX10-NEXT: s_add_i32 s1, s1, s2
+; GFX10-NEXT: s_add_i32 s3, s3, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+ %cast = bitcast <4 x i16> %result to <2 x i32>
+ ret <2 x i32> %cast
+}
+
+; FIXME
+; define <5 x i16> @v_saddsat_v5i16(<5 x i16> %lhs, <5 x i16> %rhs) {
+; %result = call <5 x i16> @llvm.sadd.sat.v5i16(<5 x i16> %lhs, <5 x i16> %rhs)
+; ret <5 x i16> %result
+; }
+
+; define amdgpu_ps <5 x i16> @s_saddsat_v5i16(<5 x i16> inreg %lhs, <5 x i16> inreg %rhs) {
+; %result = call <5 x i16> @llvm.sadd.sat.v5i16(<5 x i16> %lhs, <5 x i16> %rhs)
+; ret <5 x i16> %result
+; }
+
+define <3 x float> @v_saddsat_v6i16(<6 x i16> %lhs, <6 x i16> %rhs) {
+; GFX6-LABEL: v_saddsat_v6i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v14, 0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX6-NEXT: v_sub_i32_e32 v14, vcc, s5, v14
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v12, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v12, vcc, s4, v12
+; GFX6-NEXT: v_max_i32_e32 v6, v14, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_min_i32_e32 v6, v6, v12
+; GFX6-NEXT: v_min_i32_e32 v12, 0, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v7
+; GFX6-NEXT: v_max_i32_e32 v7, 0, v1
+; GFX6-NEXT: v_sub_i32_e32 v12, vcc, s5, v12
+; GFX6-NEXT: v_sub_i32_e32 v7, vcc, s4, v7
+; GFX6-NEXT: v_max_i32_e32 v6, v12, v6
+; GFX6-NEXT: v_min_i32_e32 v6, v6, v7
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v8
+; GFX6-NEXT: v_min_i32_e32 v8, 0, v2
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, s5, v8
+; GFX6-NEXT: v_bfrev_b32_e32 v13, -2
+; GFX6-NEXT: v_max_i32_e32 v7, 0, v2
+; GFX6-NEXT: v_sub_i32_e32 v7, vcc, v13, v7
+; GFX6-NEXT: v_max_i32_e32 v6, v8, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_min_i32_e32 v6, v6, v7
+; GFX6-NEXT: v_mov_b32_e32 v15, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v8, 0, v3
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v6
+; GFX6-NEXT: v_max_i32_e32 v7, 0, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, v15, v8
+; GFX6-NEXT: v_sub_i32_e32 v7, vcc, v13, v7
+; GFX6-NEXT: v_max_i32_e32 v6, v8, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_min_i32_e32 v6, v6, v7
+; GFX6-NEXT: v_min_i32_e32 v8, 0, v4
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v6
+; GFX6-NEXT: v_max_i32_e32 v7, 0, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v10
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, v15, v8
+; GFX6-NEXT: v_sub_i32_e32 v7, vcc, v13, v7
+; GFX6-NEXT: v_max_i32_e32 v6, v8, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_min_i32_e32 v6, v6, v7
+; GFX6-NEXT: v_min_i32_e32 v8, 0, v5
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v6
+; GFX6-NEXT: v_max_i32_e32 v7, 0, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v11
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, v15, v8
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: v_sub_i32_e32 v7, vcc, v13, v7
+; GFX6-NEXT: v_max_i32_e32 v6, v8, v6
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_min_i32_e32 v6, v6, v7
+; GFX6-NEXT: v_add_i32_e32 v5, vcc, v5, v6
+; GFX6-NEXT: v_ashrrev_i32_e32 v2, 16, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT: v_ashrrev_i32_e32 v5, 16, v5
+; GFX6-NEXT: v_and_b32_e32 v3, s4, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v4, 16, v4
+; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_v6i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s5, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v11, 0, v0
+; GFX8-NEXT: v_sub_u16_e32 v11, s5, v11
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v9, 0, v0
+; GFX8-NEXT: v_min_i16_e32 v13, 0, v6
+; GFX8-NEXT: v_sub_u16_e32 v9, s4, v9
+; GFX8-NEXT: v_max_i16_e32 v11, v11, v3
+; GFX8-NEXT: v_min_i16_e32 v9, v11, v9
+; GFX8-NEXT: v_max_i16_e32 v11, 0, v6
+; GFX8-NEXT: v_sub_u16_e32 v13, s5, v13
+; GFX8-NEXT: v_max_i16_sdwa v3, v13, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v13, 0, v1
+; GFX8-NEXT: v_sub_u16_e32 v11, s4, v11
+; GFX8-NEXT: v_min_i16_e32 v3, v3, v11
+; GFX8-NEXT: v_max_i16_e32 v11, 0, v1
+; GFX8-NEXT: v_sub_u16_e32 v13, s5, v13
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX8-NEXT: v_min_i16_e32 v14, 0, v7
+; GFX8-NEXT: v_sub_u16_e32 v11, s4, v11
+; GFX8-NEXT: v_max_i16_e32 v13, v13, v4
+; GFX8-NEXT: v_min_i16_e32 v11, v13, v11
+; GFX8-NEXT: v_max_i16_e32 v13, 0, v7
+; GFX8-NEXT: v_sub_u16_e32 v14, s5, v14
+; GFX8-NEXT: v_max_i16_sdwa v4, v14, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_u16_e32 v13, s4, v13
+; GFX8-NEXT: v_mov_b32_e32 v12, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v14, 0, v2
+; GFX8-NEXT: v_sub_u16_e32 v14, v12, v14
+; GFX8-NEXT: v_min_i16_e32 v4, v4, v13
+; GFX8-NEXT: v_mov_b32_e32 v10, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v13, 0, v2
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v2
+; GFX8-NEXT: v_sub_u16_e32 v13, v10, v13
+; GFX8-NEXT: v_max_i16_e32 v14, v14, v5
+; GFX8-NEXT: v_min_i16_e32 v13, v14, v13
+; GFX8-NEXT: v_max_i16_e32 v14, 0, v8
+; GFX8-NEXT: v_sub_u16_e32 v10, v10, v14
+; GFX8-NEXT: v_min_i16_e32 v14, 0, v8
+; GFX8-NEXT: v_sub_u16_e32 v12, v12, v14
+; GFX8-NEXT: v_max_i16_sdwa v5, v12, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v9
+; GFX8-NEXT: v_add_u16_sdwa v3, v6, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v3
+; GFX8-NEXT: v_min_i16_e32 v5, v5, v10
+; GFX8-NEXT: v_add_u16_e32 v1, v1, v11
+; GFX8-NEXT: v_add_u16_sdwa v3, v7, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_add_u16_e32 v2, v2, v13
+; GFX8-NEXT: v_add_u16_sdwa v3, v8, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_v6i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s5, 0xffff8000
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, 0, 0
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s5
+; GFX9-NEXT: v_pk_min_i16 v7, v0, s6
+; GFX9-NEXT: v_pk_sub_i16 v7, s5, v7
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s4
+; GFX9-NEXT: v_pk_max_i16 v6, v0, s6
+; GFX9-NEXT: v_pk_sub_i16 v6, s4, v6
+; GFX9-NEXT: v_pk_max_i16 v3, v7, v3
+; GFX9-NEXT: v_pk_min_i16 v3, v3, v6
+; GFX9-NEXT: v_pk_min_i16 v6, v1, s6
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v3
+; GFX9-NEXT: v_pk_max_i16 v3, v1, s6
+; GFX9-NEXT: v_pk_sub_i16 v6, s5, v6
+; GFX9-NEXT: v_pk_sub_i16 v3, s4, v3
+; GFX9-NEXT: v_pk_max_i16 v4, v6, v4
+; GFX9-NEXT: v_pk_min_i16 v3, v4, v3
+; GFX9-NEXT: v_pk_min_i16 v4, v2, s6
+; GFX9-NEXT: v_pk_sub_i16 v4, s5, v4
+; GFX9-NEXT: v_pk_add_u16 v1, v1, v3
+; GFX9-NEXT: v_pk_max_i16 v3, v2, s6
+; GFX9-NEXT: v_pk_sub_i16 v3, s4, v3
+; GFX9-NEXT: v_pk_max_i16 v4, v4, v5
+; GFX9-NEXT: v_pk_min_i16 v3, v4, v3
+; GFX9-NEXT: v_pk_add_u16 v2, v2, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_v6i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, 0, 0
+; GFX10-NEXT: s_mov_b32 s6, 0xffff8000
+; GFX10-NEXT: v_pk_min_i16 v7, v0, s5
+; GFX10-NEXT: v_pk_min_i16 v8, v1, s5
+; GFX10-NEXT: v_pk_min_i16 v9, v2, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s6, s6
+; GFX10-NEXT: v_pk_max_i16 v6, v0, s5
+; GFX10-NEXT: v_pk_sub_i16 v14, s6, v7
+; GFX10-NEXT: v_pk_sub_i16 v15, s6, v8
+; GFX10-NEXT: v_pk_sub_i16 v19, s6, v9
+; GFX10-NEXT: v_pk_max_i16 v10, v1, s5
+; GFX10-NEXT: v_pk_max_i16 v11, v2, s5
+; GFX10-NEXT: s_movk_i32 s4, 0x7fff
+; GFX10-NEXT: v_pk_max_i16 v3, v14, v3
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s4, s4
+; GFX10-NEXT: v_pk_max_i16 v4, v15, v4
+; GFX10-NEXT: v_pk_sub_i16 v6, s4, v6
+; GFX10-NEXT: v_pk_sub_i16 v7, s4, v10
+; GFX10-NEXT: v_pk_sub_i16 v8, s4, v11
+; GFX10-NEXT: v_pk_max_i16 v5, v19, v5
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_pk_min_i16 v3, v3, v6
+; GFX10-NEXT: v_pk_min_i16 v4, v4, v7
+; GFX10-NEXT: v_pk_min_i16 v5, v5, v8
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v3
+; GFX10-NEXT: v_pk_add_u16 v1, v1, v4
+; GFX10-NEXT: v_pk_add_u16 v2, v2, v5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <6 x i16> @llvm.sadd.sat.v6i16(<6 x i16> %lhs, <6 x i16> %rhs)
+ %cast = bitcast <6 x i16> %result to <3 x float>
+ ret <3 x float> %cast
+}
+
+define amdgpu_ps <3 x i32> @s_saddsat_v6i16(<6 x i16> inreg %lhs, <6 x i16> inreg %rhs) {
+; GFX6-LABEL: s_saddsat_v6i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s6, s6, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_brev_b32 s12, -2
+; GFX6-NEXT: s_cselect_b32 s14, s0, 0
+; GFX6-NEXT: s_sub_i32 s14, s12, s14
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_mov_b32 s13, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s15, s0, 0
+; GFX6-NEXT: s_sub_i32 s15, s13, s15
+; GFX6-NEXT: s_cmp_gt_i32 s15, s6
+; GFX6-NEXT: s_cselect_b32 s6, s15, s6
+; GFX6-NEXT: s_cmp_lt_i32 s6, s14
+; GFX6-NEXT: s_cselect_b32 s6, s6, s14
+; GFX6-NEXT: s_add_i32 s0, s0, s6
+; GFX6-NEXT: s_ashr_i32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s6, s7, 16
+; GFX6-NEXT: s_cmp_gt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s7, s1, 0
+; GFX6-NEXT: s_sub_i32 s7, s12, s7
+; GFX6-NEXT: s_cmp_lt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s14, s1, 0
+; GFX6-NEXT: s_sub_i32 s14, s13, s14
+; GFX6-NEXT: s_cmp_gt_i32 s14, s6
+; GFX6-NEXT: s_cselect_b32 s6, s14, s6
+; GFX6-NEXT: s_cmp_lt_i32 s6, s7
+; GFX6-NEXT: s_cselect_b32 s6, s6, s7
+; GFX6-NEXT: s_add_i32 s1, s1, s6
+; GFX6-NEXT: s_ashr_i32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s6, s8, 16
+; GFX6-NEXT: s_cmp_gt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s7, s2, 0
+; GFX6-NEXT: s_sub_i32 s7, s12, s7
+; GFX6-NEXT: s_cmp_lt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s8, s2, 0
+; GFX6-NEXT: s_sub_i32 s8, s13, s8
+; GFX6-NEXT: s_cmp_gt_i32 s8, s6
+; GFX6-NEXT: s_cselect_b32 s6, s8, s6
+; GFX6-NEXT: s_cmp_lt_i32 s6, s7
+; GFX6-NEXT: s_cselect_b32 s6, s6, s7
+; GFX6-NEXT: s_add_i32 s2, s2, s6
+; GFX6-NEXT: s_ashr_i32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s6, s9, 16
+; GFX6-NEXT: s_cmp_gt_i32 s3, 0
+; GFX6-NEXT: s_cselect_b32 s7, s3, 0
+; GFX6-NEXT: s_sub_i32 s7, s12, s7
+; GFX6-NEXT: s_cmp_lt_i32 s3, 0
+; GFX6-NEXT: s_cselect_b32 s8, s3, 0
+; GFX6-NEXT: s_sub_i32 s8, s13, s8
+; GFX6-NEXT: s_cmp_gt_i32 s8, s6
+; GFX6-NEXT: s_cselect_b32 s6, s8, s6
+; GFX6-NEXT: s_cmp_lt_i32 s6, s7
+; GFX6-NEXT: s_cselect_b32 s6, s6, s7
+; GFX6-NEXT: s_add_i32 s3, s3, s6
+; GFX6-NEXT: s_ashr_i32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_lshl_b32 s6, s10, 16
+; GFX6-NEXT: s_cmp_gt_i32 s4, 0
+; GFX6-NEXT: s_cselect_b32 s7, s4, 0
+; GFX6-NEXT: s_sub_i32 s7, s12, s7
+; GFX6-NEXT: s_cmp_lt_i32 s4, 0
+; GFX6-NEXT: s_cselect_b32 s8, s4, 0
+; GFX6-NEXT: s_sub_i32 s8, s13, s8
+; GFX6-NEXT: s_cmp_gt_i32 s8, s6
+; GFX6-NEXT: s_cselect_b32 s6, s8, s6
+; GFX6-NEXT: s_cmp_lt_i32 s6, s7
+; GFX6-NEXT: s_cselect_b32 s6, s6, s7
+; GFX6-NEXT: s_add_i32 s4, s4, s6
+; GFX6-NEXT: s_ashr_i32 s4, s4, 16
+; GFX6-NEXT: s_lshl_b32 s5, s5, 16
+; GFX6-NEXT: s_lshl_b32 s6, s11, 16
+; GFX6-NEXT: s_cmp_gt_i32 s5, 0
+; GFX6-NEXT: s_cselect_b32 s7, s5, 0
+; GFX6-NEXT: s_sub_i32 s7, s12, s7
+; GFX6-NEXT: s_cmp_lt_i32 s5, 0
+; GFX6-NEXT: s_cselect_b32 s8, s5, 0
+; GFX6-NEXT: s_sub_i32 s8, s13, s8
+; GFX6-NEXT: s_cmp_gt_i32 s8, s6
+; GFX6-NEXT: s_cselect_b32 s6, s8, s6
+; GFX6-NEXT: s_cmp_lt_i32 s6, s7
+; GFX6-NEXT: s_cselect_b32 s6, s6, s7
+; GFX6-NEXT: s_add_i32 s5, s5, s6
+; GFX6-NEXT: s_mov_b32 s6, 0xffff
+; GFX6-NEXT: s_and_b32 s1, s1, s6
+; GFX6-NEXT: s_and_b32 s0, s0, s6
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s6
+; GFX6-NEXT: s_and_b32 s2, s3, s6
+; GFX6-NEXT: s_ashr_i32 s5, s5, 16
+; GFX6-NEXT: s_and_b32 s3, s5, s6
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_or_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s2, s4, s6
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_or_b32 s2, s2, s3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_v6i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshr_b32 s9, s3, 16
+; GFX8-NEXT: s_lshr_b32 s10, s4, 16
+; GFX8-NEXT: s_lshr_b32 s11, s5, 16
+; GFX8-NEXT: s_lshr_b32 s6, s0, 16
+; GFX8-NEXT: s_lshr_b32 s7, s1, 16
+; GFX8-NEXT: s_lshr_b32 s8, s2, 16
+; GFX8-NEXT: s_sext_i32_i16 s14, s0
+; GFX8-NEXT: s_sext_i32_i16 s15, 0
+; GFX8-NEXT: s_cmp_gt_i32 s14, s15
+; GFX8-NEXT: s_movk_i32 s12, 0x7fff
+; GFX8-NEXT: s_cselect_b32 s16, s14, s15
+; GFX8-NEXT: s_sub_i32 s16, s12, s16
+; GFX8-NEXT: s_cmp_lt_i32 s14, s15
+; GFX8-NEXT: s_mov_b32 s13, 0x8000
+; GFX8-NEXT: s_cselect_b32 s14, s14, s15
+; GFX8-NEXT: s_sub_i32 s14, s13, s14
+; GFX8-NEXT: s_sext_i32_i16 s14, s14
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_cmp_gt_i32 s14, s3
+; GFX8-NEXT: s_cselect_b32 s3, s14, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s14, s16
+; GFX8-NEXT: s_cmp_lt_i32 s3, s14
+; GFX8-NEXT: s_cselect_b32 s3, s3, s14
+; GFX8-NEXT: s_add_i32 s0, s0, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s6
+; GFX8-NEXT: s_cmp_gt_i32 s3, s15
+; GFX8-NEXT: s_cselect_b32 s14, s3, s15
+; GFX8-NEXT: s_sub_i32 s14, s12, s14
+; GFX8-NEXT: s_cmp_lt_i32 s3, s15
+; GFX8-NEXT: s_cselect_b32 s3, s3, s15
+; GFX8-NEXT: s_sub_i32 s3, s13, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s9, s9
+; GFX8-NEXT: s_cmp_gt_i32 s3, s9
+; GFX8-NEXT: s_cselect_b32 s3, s3, s9
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s9, s14
+; GFX8-NEXT: s_cmp_lt_i32 s3, s9
+; GFX8-NEXT: s_cselect_b32 s3, s3, s9
+; GFX8-NEXT: s_add_i32 s6, s6, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s1
+; GFX8-NEXT: s_cmp_gt_i32 s3, s15
+; GFX8-NEXT: s_cselect_b32 s9, s3, s15
+; GFX8-NEXT: s_sub_i32 s9, s12, s9
+; GFX8-NEXT: s_cmp_lt_i32 s3, s15
+; GFX8-NEXT: s_cselect_b32 s3, s3, s15
+; GFX8-NEXT: s_sub_i32 s3, s13, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_cmp_gt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s9
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_cmp_lt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_add_i32 s1, s1, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s7
+; GFX8-NEXT: s_cmp_gt_i32 s3, s15
+; GFX8-NEXT: s_cselect_b32 s4, s3, s15
+; GFX8-NEXT: s_sub_i32 s4, s12, s4
+; GFX8-NEXT: s_cmp_lt_i32 s3, s15
+; GFX8-NEXT: s_cselect_b32 s3, s3, s15
+; GFX8-NEXT: s_sub_i32 s3, s13, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s9, s10
+; GFX8-NEXT: s_cmp_gt_i32 s3, s9
+; GFX8-NEXT: s_cselect_b32 s3, s3, s9
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_cmp_lt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_add_i32 s7, s7, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s2
+; GFX8-NEXT: s_cmp_gt_i32 s3, s15
+; GFX8-NEXT: s_cselect_b32 s4, s3, s15
+; GFX8-NEXT: s_sub_i32 s4, s12, s4
+; GFX8-NEXT: s_cmp_lt_i32 s3, s15
+; GFX8-NEXT: s_cselect_b32 s3, s3, s15
+; GFX8-NEXT: s_sub_i32 s3, s13, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_cmp_gt_i32 s3, s5
+; GFX8-NEXT: s_cselect_b32 s3, s3, s5
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_cmp_lt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_add_i32 s2, s2, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s8
+; GFX8-NEXT: s_cmp_gt_i32 s3, s15
+; GFX8-NEXT: s_cselect_b32 s4, s3, s15
+; GFX8-NEXT: s_sub_i32 s4, s12, s4
+; GFX8-NEXT: s_cmp_lt_i32 s3, s15
+; GFX8-NEXT: s_cselect_b32 s3, s3, s15
+; GFX8-NEXT: s_sub_i32 s3, s13, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s5, s11
+; GFX8-NEXT: s_cmp_gt_i32 s3, s5
+; GFX8-NEXT: s_cselect_b32 s3, s3, s5
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_cmp_lt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_add_i32 s8, s8, s3
+; GFX8-NEXT: s_bfe_u32 s3, s6, 0x100000
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s3, s3, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s3
+; GFX8-NEXT: s_bfe_u32 s3, s7, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_lshl_b32 s3, s3, 16
+; GFX8-NEXT: s_or_b32 s1, s1, s3
+; GFX8-NEXT: s_bfe_u32 s3, s8, 0x100000
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_lshl_b32 s3, s3, 16
+; GFX8-NEXT: s_or_b32 s2, s2, s3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_v6i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, 0, 0
+; GFX9-NEXT: s_sext_i32_i16 s11, s8
+; GFX9-NEXT: s_sext_i32_i16 s9, s0
+; GFX9-NEXT: s_ashr_i32 s10, s0, 16
+; GFX9-NEXT: s_ashr_i32 s8, s8, 16
+; GFX9-NEXT: s_cmp_gt_i32 s9, s11
+; GFX9-NEXT: s_cselect_b32 s12, s9, s11
+; GFX9-NEXT: s_cmp_gt_i32 s10, s8
+; GFX9-NEXT: s_movk_i32 s6, 0x7fff
+; GFX9-NEXT: s_cselect_b32 s13, s10, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s13
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s6
+; GFX9-NEXT: s_lshr_b32 s14, s12, 16
+; GFX9-NEXT: s_lshr_b32 s13, s6, 16
+; GFX9-NEXT: s_sub_i32 s12, s6, s12
+; GFX9-NEXT: s_sub_i32 s14, s13, s14
+; GFX9-NEXT: s_cmp_lt_i32 s9, s11
+; GFX9-NEXT: s_cselect_b32 s9, s9, s11
+; GFX9-NEXT: s_cmp_lt_i32 s10, s8
+; GFX9-NEXT: s_mov_b32 s7, 0xffff8000
+; GFX9-NEXT: s_cselect_b32 s10, s10, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s7
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s14
+; GFX9-NEXT: s_lshr_b32 s14, s9, 16
+; GFX9-NEXT: s_lshr_b32 s10, s7, 16
+; GFX9-NEXT: s_sub_i32 s9, s7, s9
+; GFX9-NEXT: s_sub_i32 s14, s10, s14
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s14
+; GFX9-NEXT: s_sext_i32_i16 s14, s9
+; GFX9-NEXT: s_sext_i32_i16 s15, s3
+; GFX9-NEXT: s_ashr_i32 s9, s9, 16
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_cmp_gt_i32 s14, s15
+; GFX9-NEXT: s_cselect_b32 s14, s14, s15
+; GFX9-NEXT: s_cmp_gt_i32 s9, s3
+; GFX9-NEXT: s_cselect_b32 s3, s9, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s14, s3
+; GFX9-NEXT: s_sext_i32_i16 s9, s3
+; GFX9-NEXT: s_sext_i32_i16 s14, s12
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_ashr_i32 s12, s12, 16
+; GFX9-NEXT: s_cmp_lt_i32 s9, s14
+; GFX9-NEXT: s_cselect_b32 s9, s9, s14
+; GFX9-NEXT: s_cmp_lt_i32 s3, s12
+; GFX9-NEXT: s_cselect_b32 s3, s3, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s9, s3
+; GFX9-NEXT: s_lshr_b32 s9, s0, 16
+; GFX9-NEXT: s_lshr_b32 s12, s3, 16
+; GFX9-NEXT: s_add_i32 s0, s0, s3
+; GFX9-NEXT: s_add_i32 s9, s9, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s9
+; GFX9-NEXT: s_sext_i32_i16 s3, s1
+; GFX9-NEXT: s_ashr_i32 s9, s1, 16
+; GFX9-NEXT: s_cmp_gt_i32 s3, s11
+; GFX9-NEXT: s_cselect_b32 s12, s3, s11
+; GFX9-NEXT: s_cmp_gt_i32 s9, s8
+; GFX9-NEXT: s_cselect_b32 s14, s9, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s14
+; GFX9-NEXT: s_lshr_b32 s14, s12, 16
+; GFX9-NEXT: s_sub_i32 s12, s6, s12
+; GFX9-NEXT: s_sub_i32 s14, s13, s14
+; GFX9-NEXT: s_cmp_lt_i32 s3, s11
+; GFX9-NEXT: s_cselect_b32 s3, s3, s11
+; GFX9-NEXT: s_cmp_lt_i32 s9, s8
+; GFX9-NEXT: s_cselect_b32 s9, s9, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX9-NEXT: s_lshr_b32 s9, s3, 16
+; GFX9-NEXT: s_sub_i32 s3, s7, s3
+; GFX9-NEXT: s_sub_i32 s9, s10, s9
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s14
+; GFX9-NEXT: s_sext_i32_i16 s9, s3
+; GFX9-NEXT: s_sext_i32_i16 s14, s4
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_cmp_gt_i32 s9, s14
+; GFX9-NEXT: s_cselect_b32 s9, s9, s14
+; GFX9-NEXT: s_cmp_gt_i32 s3, s4
+; GFX9-NEXT: s_cselect_b32 s3, s3, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s9, s3
+; GFX9-NEXT: s_sext_i32_i16 s4, s3
+; GFX9-NEXT: s_sext_i32_i16 s9, s12
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_ashr_i32 s12, s12, 16
+; GFX9-NEXT: s_cmp_lt_i32 s4, s9
+; GFX9-NEXT: s_cselect_b32 s4, s4, s9
+; GFX9-NEXT: s_cmp_lt_i32 s3, s12
+; GFX9-NEXT: s_cselect_b32 s3, s3, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s4, s3
+; GFX9-NEXT: s_lshr_b32 s4, s1, 16
+; GFX9-NEXT: s_lshr_b32 s9, s3, 16
+; GFX9-NEXT: s_add_i32 s1, s1, s3
+; GFX9-NEXT: s_add_i32 s4, s4, s9
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s4
+; GFX9-NEXT: s_sext_i32_i16 s3, s2
+; GFX9-NEXT: s_ashr_i32 s4, s2, 16
+; GFX9-NEXT: s_cmp_gt_i32 s3, s11
+; GFX9-NEXT: s_cselect_b32 s9, s3, s11
+; GFX9-NEXT: s_cmp_gt_i32 s4, s8
+; GFX9-NEXT: s_cselect_b32 s12, s4, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s12
+; GFX9-NEXT: s_lshr_b32 s12, s9, 16
+; GFX9-NEXT: s_sub_i32 s6, s6, s9
+; GFX9-NEXT: s_sub_i32 s9, s13, s12
+; GFX9-NEXT: s_cmp_lt_i32 s3, s11
+; GFX9-NEXT: s_cselect_b32 s3, s3, s11
+; GFX9-NEXT: s_cmp_lt_i32 s4, s8
+; GFX9-NEXT: s_cselect_b32 s4, s4, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4
+; GFX9-NEXT: s_lshr_b32 s4, s3, 16
+; GFX9-NEXT: s_sub_i32 s3, s7, s3
+; GFX9-NEXT: s_sub_i32 s4, s10, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4
+; GFX9-NEXT: s_sext_i32_i16 s4, s3
+; GFX9-NEXT: s_sext_i32_i16 s7, s5
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_ashr_i32 s5, s5, 16
+; GFX9-NEXT: s_cmp_gt_i32 s4, s7
+; GFX9-NEXT: s_cselect_b32 s4, s4, s7
+; GFX9-NEXT: s_cmp_gt_i32 s3, s5
+; GFX9-NEXT: s_cselect_b32 s3, s3, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s4, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s9
+; GFX9-NEXT: s_sext_i32_i16 s4, s3
+; GFX9-NEXT: s_sext_i32_i16 s5, s6
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_ashr_i32 s6, s6, 16
+; GFX9-NEXT: s_cmp_lt_i32 s4, s5
+; GFX9-NEXT: s_cselect_b32 s4, s4, s5
+; GFX9-NEXT: s_cmp_lt_i32 s3, s6
+; GFX9-NEXT: s_cselect_b32 s3, s3, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s4, s3
+; GFX9-NEXT: s_lshr_b32 s4, s2, 16
+; GFX9-NEXT: s_lshr_b32 s5, s3, 16
+; GFX9-NEXT: s_add_i32 s2, s2, s3
+; GFX9-NEXT: s_add_i32 s4, s4, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_v6i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, 0, 0
+; GFX10-NEXT: s_sext_i32_i16 s7, s0
+; GFX10-NEXT: s_sext_i32_i16 s9, s6
+; GFX10-NEXT: s_ashr_i32 s8, s0, 16
+; GFX10-NEXT: s_ashr_i32 s6, s6, 16
+; GFX10-NEXT: s_cmp_gt_i32 s7, s9
+; GFX10-NEXT: s_movk_i32 s11, 0x7fff
+; GFX10-NEXT: s_cselect_b32 s10, s7, s9
+; GFX10-NEXT: s_cmp_gt_i32 s8, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s11, s11, s11
+; GFX10-NEXT: s_cselect_b32 s12, s8, s6
+; GFX10-NEXT: s_mov_b32 s14, 0xffff8000
+; GFX10-NEXT: s_pack_ll_b32_b16 s10, s10, s12
+; GFX10-NEXT: s_lshr_b32 s12, s11, 16
+; GFX10-NEXT: s_lshr_b32 s13, s10, 16
+; GFX10-NEXT: s_sub_i32 s10, s11, s10
+; GFX10-NEXT: s_sub_i32 s13, s12, s13
+; GFX10-NEXT: s_cmp_lt_i32 s7, s9
+; GFX10-NEXT: s_pack_ll_b32_b16 s14, s14, s14
+; GFX10-NEXT: s_cselect_b32 s7, s7, s9
+; GFX10-NEXT: s_cmp_lt_i32 s8, s6
+; GFX10-NEXT: s_sext_i32_i16 s16, s3
+; GFX10-NEXT: s_cselect_b32 s8, s8, s6
+; GFX10-NEXT: s_ashr_i32 s3, s3, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX10-NEXT: s_lshr_b32 s8, s14, 16
+; GFX10-NEXT: s_lshr_b32 s15, s7, 16
+; GFX10-NEXT: s_sub_i32 s7, s14, s7
+; GFX10-NEXT: s_sub_i32 s15, s8, s15
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_pack_ll_b32_b16 s7, s7, s15
+; GFX10-NEXT: s_sext_i32_i16 s15, s7
+; GFX10-NEXT: s_ashr_i32 s7, s7, 16
+; GFX10-NEXT: s_cmp_gt_i32 s15, s16
+; GFX10-NEXT: s_cselect_b32 s15, s15, s16
+; GFX10-NEXT: s_cmp_gt_i32 s7, s3
+; GFX10-NEXT: s_sext_i32_i16 s16, s4
+; GFX10-NEXT: s_cselect_b32 s3, s7, s3
+; GFX10-NEXT: s_pack_ll_b32_b16 s7, s10, s13
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s15, s3
+; GFX10-NEXT: s_sext_i32_i16 s13, s7
+; GFX10-NEXT: s_sext_i32_i16 s10, s3
+; GFX10-NEXT: s_ashr_i32 s3, s3, 16
+; GFX10-NEXT: s_ashr_i32 s7, s7, 16
+; GFX10-NEXT: s_cmp_lt_i32 s10, s13
+; GFX10-NEXT: s_cselect_b32 s10, s10, s13
+; GFX10-NEXT: s_cmp_lt_i32 s3, s7
+; GFX10-NEXT: s_cselect_b32 s3, s3, s7
+; GFX10-NEXT: s_lshr_b32 s7, s0, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s10, s3
+; GFX10-NEXT: s_lshr_b32 s10, s3, 16
+; GFX10-NEXT: s_add_i32 s0, s0, s3
+; GFX10-NEXT: s_sext_i32_i16 s3, s1
+; GFX10-NEXT: s_add_i32 s7, s7, s10
+; GFX10-NEXT: s_ashr_i32 s10, s1, 16
+; GFX10-NEXT: s_cmp_gt_i32 s3, s9
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s7
+; GFX10-NEXT: s_cselect_b32 s13, s3, s9
+; GFX10-NEXT: s_cmp_gt_i32 s10, s6
+; GFX10-NEXT: s_cselect_b32 s15, s10, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s13, s13, s15
+; GFX10-NEXT: s_lshr_b32 s15, s13, 16
+; GFX10-NEXT: s_sub_i32 s13, s11, s13
+; GFX10-NEXT: s_sub_i32 s15, s12, s15
+; GFX10-NEXT: s_cmp_lt_i32 s3, s9
+; GFX10-NEXT: s_cselect_b32 s3, s3, s9
+; GFX10-NEXT: s_cmp_lt_i32 s10, s6
+; GFX10-NEXT: s_cselect_b32 s10, s10, s6
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s10
+; GFX10-NEXT: s_lshr_b32 s10, s3, 16
+; GFX10-NEXT: s_sub_i32 s3, s14, s3
+; GFX10-NEXT: s_sub_i32 s10, s8, s10
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s10
+; GFX10-NEXT: s_sext_i32_i16 s10, s3
+; GFX10-NEXT: s_ashr_i32 s3, s3, 16
+; GFX10-NEXT: s_cmp_gt_i32 s10, s16
+; GFX10-NEXT: s_cselect_b32 s10, s10, s16
+; GFX10-NEXT: s_cmp_gt_i32 s3, s4
+; GFX10-NEXT: s_cselect_b32 s3, s3, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s13, s15
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s10, s3
+; GFX10-NEXT: s_sext_i32_i16 s13, s4
+; GFX10-NEXT: s_sext_i32_i16 s10, s3
+; GFX10-NEXT: s_ashr_i32 s3, s3, 16
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_cmp_lt_i32 s10, s13
+; GFX10-NEXT: s_cselect_b32 s10, s10, s13
+; GFX10-NEXT: s_cmp_lt_i32 s3, s4
+; GFX10-NEXT: s_cselect_b32 s3, s3, s4
+; GFX10-NEXT: s_sext_i32_i16 s4, s2
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s10, s3
+; GFX10-NEXT: s_lshr_b32 s10, s1, 16
+; GFX10-NEXT: s_lshr_b32 s13, s3, 16
+; GFX10-NEXT: s_add_i32 s1, s1, s3
+; GFX10-NEXT: s_add_i32 s10, s10, s13
+; GFX10-NEXT: s_ashr_i32 s3, s2, 16
+; GFX10-NEXT: s_cmp_gt_i32 s4, s9
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s10
+; GFX10-NEXT: s_cselect_b32 s13, s4, s9
+; GFX10-NEXT: s_cmp_gt_i32 s3, s6
+; GFX10-NEXT: s_cselect_b32 s15, s3, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s13, s13, s15
+; GFX10-NEXT: s_lshr_b32 s15, s13, 16
+; GFX10-NEXT: s_sub_i32 s11, s11, s13
+; GFX10-NEXT: s_sub_i32 s12, s12, s15
+; GFX10-NEXT: s_cmp_lt_i32 s4, s9
+; GFX10-NEXT: s_cselect_b32 s4, s4, s9
+; GFX10-NEXT: s_cmp_lt_i32 s3, s6
+; GFX10-NEXT: s_cselect_b32 s3, s3, s6
+; GFX10-NEXT: s_sext_i32_i16 s6, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s4, s3
+; GFX10-NEXT: s_lshr_b32 s4, s3, 16
+; GFX10-NEXT: s_sub_i32 s3, s14, s3
+; GFX10-NEXT: s_sub_i32 s4, s8, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s4
+; GFX10-NEXT: s_ashr_i32 s4, s5, 16
+; GFX10-NEXT: s_sext_i32_i16 s5, s3
+; GFX10-NEXT: s_ashr_i32 s3, s3, 16
+; GFX10-NEXT: s_cmp_gt_i32 s5, s6
+; GFX10-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-NEXT: s_cmp_gt_i32 s3, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s11, s12
+; GFX10-NEXT: s_cselect_b32 s3, s3, s4
+; GFX10-NEXT: s_sext_i32_i16 s4, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s5, s3
+; GFX10-NEXT: s_ashr_i32 s5, s6, 16
+; GFX10-NEXT: s_sext_i32_i16 s6, s3
+; GFX10-NEXT: s_ashr_i32 s3, s3, 16
+; GFX10-NEXT: s_cmp_lt_i32 s6, s4
+; GFX10-NEXT: s_cselect_b32 s4, s6, s4
+; GFX10-NEXT: s_cmp_lt_i32 s3, s5
+; GFX10-NEXT: s_cselect_b32 s3, s3, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s4, s3
+; GFX10-NEXT: s_lshr_b32 s4, s2, 16
+; GFX10-NEXT: s_lshr_b32 s5, s3, 16
+; GFX10-NEXT: s_add_i32 s2, s2, s3
+; GFX10-NEXT: s_add_i32 s4, s4, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s4
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <6 x i16> @llvm.sadd.sat.v6i16(<6 x i16> %lhs, <6 x i16> %rhs)
+ %cast = bitcast <6 x i16> %result to <3 x i32>
+ ret <3 x i32> %cast
+}
+
+define <4 x float> @v_saddsat_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; GFX6-LABEL: v_saddsat_v8i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v18, 0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; GFX6-NEXT: v_sub_i32_e32 v18, vcc, s5, v18
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v16, 0, v0
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, s4, v16
+; GFX6-NEXT: v_max_i32_e32 v8, v18, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v16
+; GFX6-NEXT: v_min_i32_e32 v16, 0, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v9
+; GFX6-NEXT: v_max_i32_e32 v9, 0, v1
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, s5, v16
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, s4, v9
+; GFX6-NEXT: v_max_i32_e32 v8, v16, v8
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v9
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v10
+; GFX6-NEXT: v_min_i32_e32 v10, 0, v2
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, s5, v10
+; GFX6-NEXT: v_bfrev_b32_e32 v17, -2
+; GFX6-NEXT: v_max_i32_e32 v9, 0, v2
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, v17, v9
+; GFX6-NEXT: v_max_i32_e32 v8, v10, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v9
+; GFX6-NEXT: v_mov_b32_e32 v19, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v10, 0, v3
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v8
+; GFX6-NEXT: v_max_i32_e32 v9, 0, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v11
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, v19, v10
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, v17, v9
+; GFX6-NEXT: v_max_i32_e32 v8, v10, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v9
+; GFX6-NEXT: v_min_i32_e32 v10, 0, v4
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v8
+; GFX6-NEXT: v_max_i32_e32 v9, 0, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v12
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, v19, v10
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, v17, v9
+; GFX6-NEXT: v_max_i32_e32 v8, v10, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v9
+; GFX6-NEXT: v_min_i32_e32 v10, 0, v5
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v8
+; GFX6-NEXT: v_max_i32_e32 v9, 0, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v13
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, v19, v10
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, v17, v9
+; GFX6-NEXT: v_max_i32_e32 v8, v10, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v9
+; GFX6-NEXT: v_min_i32_e32 v10, 0, v6
+; GFX6-NEXT: v_add_i32_e32 v5, vcc, v5, v8
+; GFX6-NEXT: v_max_i32_e32 v9, 0, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v14
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, v19, v10
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, v17, v9
+; GFX6-NEXT: v_max_i32_e32 v8, v10, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v9
+; GFX6-NEXT: v_min_i32_e32 v10, 0, v7
+; GFX6-NEXT: v_add_i32_e32 v6, vcc, v6, v8
+; GFX6-NEXT: v_max_i32_e32 v9, 0, v7
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v15
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, v19, v10
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, v17, v9
+; GFX6-NEXT: v_max_i32_e32 v8, v10, v8
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v9
+; GFX6-NEXT: v_ashrrev_i32_e32 v2, 16, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_add_i32_e32 v7, vcc, v7, v8
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT: v_ashrrev_i32_e32 v5, 16, v5
+; GFX6-NEXT: v_and_b32_e32 v3, s4, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v4, 16, v4
+; GFX6-NEXT: v_ashrrev_i32_e32 v7, 16, v7
+; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v4
+; GFX6-NEXT: v_and_b32_e32 v4, s4, v7
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_ashrrev_i32_e32 v6, 16, v6
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX6-NEXT: v_and_b32_e32 v3, s4, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_v8i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s5, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v14, 0, v0
+; GFX8-NEXT: v_sub_u16_e32 v14, s5, v14
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v12, 0, v0
+; GFX8-NEXT: v_min_i16_e32 v16, 0, v8
+; GFX8-NEXT: v_sub_u16_e32 v12, s4, v12
+; GFX8-NEXT: v_max_i16_e32 v14, v14, v4
+; GFX8-NEXT: v_min_i16_e32 v12, v14, v12
+; GFX8-NEXT: v_max_i16_e32 v14, 0, v8
+; GFX8-NEXT: v_sub_u16_e32 v16, s5, v16
+; GFX8-NEXT: v_max_i16_sdwa v4, v16, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v16, 0, v1
+; GFX8-NEXT: v_sub_u16_e32 v14, s4, v14
+; GFX8-NEXT: v_min_i16_e32 v4, v4, v14
+; GFX8-NEXT: v_max_i16_e32 v14, 0, v1
+; GFX8-NEXT: v_sub_u16_e32 v16, s5, v16
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX8-NEXT: v_min_i16_e32 v17, 0, v9
+; GFX8-NEXT: v_sub_u16_e32 v14, s4, v14
+; GFX8-NEXT: v_max_i16_e32 v16, v16, v5
+; GFX8-NEXT: v_min_i16_e32 v14, v16, v14
+; GFX8-NEXT: v_max_i16_e32 v16, 0, v9
+; GFX8-NEXT: v_sub_u16_e32 v17, s5, v17
+; GFX8-NEXT: v_max_i16_sdwa v5, v17, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_u16_e32 v16, s4, v16
+; GFX8-NEXT: v_mov_b32_e32 v15, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v17, 0, v2
+; GFX8-NEXT: v_sub_u16_e32 v17, v15, v17
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX8-NEXT: v_min_i16_e32 v5, v5, v16
+; GFX8-NEXT: v_mov_b32_e32 v13, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v16, 0, v2
+; GFX8-NEXT: v_min_i16_e32 v18, 0, v10
+; GFX8-NEXT: v_sub_u16_e32 v16, v13, v16
+; GFX8-NEXT: v_max_i16_e32 v17, v17, v6
+; GFX8-NEXT: v_min_i16_e32 v16, v17, v16
+; GFX8-NEXT: v_max_i16_e32 v17, 0, v10
+; GFX8-NEXT: v_sub_u16_e32 v18, v15, v18
+; GFX8-NEXT: v_max_i16_sdwa v6, v18, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v18, 0, v3
+; GFX8-NEXT: v_sub_u16_e32 v17, v13, v17
+; GFX8-NEXT: v_sub_u16_e32 v18, v15, v18
+; GFX8-NEXT: v_min_i16_e32 v6, v6, v17
+; GFX8-NEXT: v_max_i16_e32 v17, 0, v3
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX8-NEXT: v_sub_u16_e32 v17, v13, v17
+; GFX8-NEXT: v_max_i16_e32 v18, v18, v7
+; GFX8-NEXT: v_min_i16_e32 v17, v18, v17
+; GFX8-NEXT: v_max_i16_e32 v18, 0, v11
+; GFX8-NEXT: v_sub_u16_e32 v13, v13, v18
+; GFX8-NEXT: v_min_i16_e32 v18, 0, v11
+; GFX8-NEXT: v_sub_u16_e32 v15, v15, v18
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v12
+; GFX8-NEXT: v_add_u16_sdwa v4, v8, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_max_i16_sdwa v7, v15, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_add_u16_e32 v1, v1, v14
+; GFX8-NEXT: v_add_u16_sdwa v4, v9, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v4
+; GFX8-NEXT: v_min_i16_e32 v7, v7, v13
+; GFX8-NEXT: v_add_u16_e32 v2, v2, v16
+; GFX8-NEXT: v_add_u16_sdwa v4, v10, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v4
+; GFX8-NEXT: v_add_u16_e32 v3, v3, v17
+; GFX8-NEXT: v_add_u16_sdwa v4, v11, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_v8i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s5, 0xffff8000
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, 0, 0
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s5
+; GFX9-NEXT: v_pk_min_i16 v9, v0, s6
+; GFX9-NEXT: v_pk_sub_i16 v9, s5, v9
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s4
+; GFX9-NEXT: v_pk_max_i16 v8, v0, s6
+; GFX9-NEXT: v_pk_sub_i16 v8, s4, v8
+; GFX9-NEXT: v_pk_max_i16 v4, v9, v4
+; GFX9-NEXT: v_pk_min_i16 v4, v4, v8
+; GFX9-NEXT: v_pk_min_i16 v8, v1, s6
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v4
+; GFX9-NEXT: v_pk_max_i16 v4, v1, s6
+; GFX9-NEXT: v_pk_sub_i16 v8, s5, v8
+; GFX9-NEXT: v_pk_sub_i16 v4, s4, v4
+; GFX9-NEXT: v_pk_max_i16 v5, v8, v5
+; GFX9-NEXT: v_pk_min_i16 v4, v5, v4
+; GFX9-NEXT: v_pk_min_i16 v5, v2, s6
+; GFX9-NEXT: v_pk_sub_i16 v5, s5, v5
+; GFX9-NEXT: v_pk_add_u16 v1, v1, v4
+; GFX9-NEXT: v_pk_max_i16 v4, v2, s6
+; GFX9-NEXT: v_pk_sub_i16 v4, s4, v4
+; GFX9-NEXT: v_pk_max_i16 v5, v5, v6
+; GFX9-NEXT: v_pk_min_i16 v4, v5, v4
+; GFX9-NEXT: v_pk_min_i16 v5, v3, s6
+; GFX9-NEXT: v_pk_sub_i16 v5, s5, v5
+; GFX9-NEXT: v_pk_add_u16 v2, v2, v4
+; GFX9-NEXT: v_pk_max_i16 v4, v3, s6
+; GFX9-NEXT: v_pk_sub_i16 v4, s4, v4
+; GFX9-NEXT: v_pk_max_i16 v5, v5, v7
+; GFX9-NEXT: v_pk_min_i16 v4, v5, v4
+; GFX9-NEXT: v_pk_add_u16 v3, v3, v4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_v8i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, 0, 0
+; GFX10-NEXT: s_mov_b32 s5, 0xffff8000
+; GFX10-NEXT: v_pk_min_i16 v8, v0, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s5, s5
+; GFX10-NEXT: v_pk_min_i16 v11, v1, s4
+; GFX10-NEXT: v_pk_min_i16 v12, v3, s4
+; GFX10-NEXT: v_pk_max_i16 v9, v0, s4
+; GFX10-NEXT: v_pk_sub_i16 v15, s5, v8
+; GFX10-NEXT: v_pk_min_i16 v8, v2, s4
+; GFX10-NEXT: v_pk_sub_i16 v11, s5, v11
+; GFX10-NEXT: v_pk_sub_i16 v12, s5, v12
+; GFX10-NEXT: v_pk_max_i16 v10, v1, s4
+; GFX10-NEXT: v_pk_max_i16 v13, v2, s4
+; GFX10-NEXT: v_pk_sub_i16 v8, s5, v8
+; GFX10-NEXT: v_pk_max_i16 v14, v3, s4
+; GFX10-NEXT: s_movk_i32 s6, 0x7fff
+; GFX10-NEXT: v_pk_max_i16 v4, v15, v4
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s6, s6
+; GFX10-NEXT: v_pk_max_i16 v5, v11, v5
+; GFX10-NEXT: v_pk_sub_i16 v9, s6, v9
+; GFX10-NEXT: v_pk_sub_i16 v10, s6, v10
+; GFX10-NEXT: v_pk_max_i16 v6, v8, v6
+; GFX10-NEXT: v_pk_sub_i16 v11, s6, v13
+; GFX10-NEXT: v_pk_sub_i16 v8, s6, v14
+; GFX10-NEXT: v_pk_max_i16 v7, v12, v7
+; GFX10-NEXT: v_pk_min_i16 v15, v4, v9
+; GFX10-NEXT: v_pk_min_i16 v19, v5, v10
+; GFX10-NEXT: v_pk_min_i16 v11, v6, v11
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_pk_min_i16 v6, v7, v8
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v15
+; GFX10-NEXT: v_pk_add_u16 v1, v1, v19
+; GFX10-NEXT: v_pk_add_u16 v2, v2, v11
+; GFX10-NEXT: v_pk_add_u16 v3, v3, v6
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+ %cast = bitcast <8 x i16> %result to <4 x float>
+ ret <4 x float> %cast
+}
+
+define amdgpu_ps <4 x i32> @s_saddsat_v8i16(<8 x i16> inreg %lhs, <8 x i16> inreg %rhs) {
+; GFX6-LABEL: s_saddsat_v8i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s8, s8, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, 0
+; GFX6-NEXT: s_brev_b32 s16, -2
+; GFX6-NEXT: s_cselect_b32 s18, s0, 0
+; GFX6-NEXT: s_sub_i32 s18, s16, s18
+; GFX6-NEXT: s_cmp_lt_i32 s0, 0
+; GFX6-NEXT: s_mov_b32 s17, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s19, s0, 0
+; GFX6-NEXT: s_sub_i32 s19, s17, s19
+; GFX6-NEXT: s_cmp_gt_i32 s19, s8
+; GFX6-NEXT: s_cselect_b32 s8, s19, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s18
+; GFX6-NEXT: s_cselect_b32 s8, s8, s18
+; GFX6-NEXT: s_add_i32 s0, s0, s8
+; GFX6-NEXT: s_ashr_i32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s8, s9, 16
+; GFX6-NEXT: s_cmp_gt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s9, s1, 0
+; GFX6-NEXT: s_sub_i32 s9, s16, s9
+; GFX6-NEXT: s_cmp_lt_i32 s1, 0
+; GFX6-NEXT: s_cselect_b32 s18, s1, 0
+; GFX6-NEXT: s_sub_i32 s18, s17, s18
+; GFX6-NEXT: s_cmp_gt_i32 s18, s8
+; GFX6-NEXT: s_cselect_b32 s8, s18, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s9
+; GFX6-NEXT: s_cselect_b32 s8, s8, s9
+; GFX6-NEXT: s_add_i32 s1, s1, s8
+; GFX6-NEXT: s_ashr_i32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s8, s10, 16
+; GFX6-NEXT: s_cmp_gt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s9, s2, 0
+; GFX6-NEXT: s_sub_i32 s9, s16, s9
+; GFX6-NEXT: s_cmp_lt_i32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s10, s2, 0
+; GFX6-NEXT: s_sub_i32 s10, s17, s10
+; GFX6-NEXT: s_cmp_gt_i32 s10, s8
+; GFX6-NEXT: s_cselect_b32 s8, s10, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s9
+; GFX6-NEXT: s_cselect_b32 s8, s8, s9
+; GFX6-NEXT: s_add_i32 s2, s2, s8
+; GFX6-NEXT: s_ashr_i32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s8, s11, 16
+; GFX6-NEXT: s_cmp_gt_i32 s3, 0
+; GFX6-NEXT: s_cselect_b32 s9, s3, 0
+; GFX6-NEXT: s_sub_i32 s9, s16, s9
+; GFX6-NEXT: s_cmp_lt_i32 s3, 0
+; GFX6-NEXT: s_cselect_b32 s10, s3, 0
+; GFX6-NEXT: s_sub_i32 s10, s17, s10
+; GFX6-NEXT: s_cmp_gt_i32 s10, s8
+; GFX6-NEXT: s_cselect_b32 s8, s10, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s9
+; GFX6-NEXT: s_cselect_b32 s8, s8, s9
+; GFX6-NEXT: s_add_i32 s3, s3, s8
+; GFX6-NEXT: s_ashr_i32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_lshl_b32 s8, s12, 16
+; GFX6-NEXT: s_cmp_gt_i32 s4, 0
+; GFX6-NEXT: s_cselect_b32 s9, s4, 0
+; GFX6-NEXT: s_sub_i32 s9, s16, s9
+; GFX6-NEXT: s_cmp_lt_i32 s4, 0
+; GFX6-NEXT: s_cselect_b32 s10, s4, 0
+; GFX6-NEXT: s_sub_i32 s10, s17, s10
+; GFX6-NEXT: s_cmp_gt_i32 s10, s8
+; GFX6-NEXT: s_cselect_b32 s8, s10, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s9
+; GFX6-NEXT: s_cselect_b32 s8, s8, s9
+; GFX6-NEXT: s_add_i32 s4, s4, s8
+; GFX6-NEXT: s_ashr_i32 s4, s4, 16
+; GFX6-NEXT: s_lshl_b32 s5, s5, 16
+; GFX6-NEXT: s_lshl_b32 s8, s13, 16
+; GFX6-NEXT: s_cmp_gt_i32 s5, 0
+; GFX6-NEXT: s_cselect_b32 s9, s5, 0
+; GFX6-NEXT: s_sub_i32 s9, s16, s9
+; GFX6-NEXT: s_cmp_lt_i32 s5, 0
+; GFX6-NEXT: s_cselect_b32 s10, s5, 0
+; GFX6-NEXT: s_sub_i32 s10, s17, s10
+; GFX6-NEXT: s_cmp_gt_i32 s10, s8
+; GFX6-NEXT: s_cselect_b32 s8, s10, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s9
+; GFX6-NEXT: s_cselect_b32 s8, s8, s9
+; GFX6-NEXT: s_add_i32 s5, s5, s8
+; GFX6-NEXT: s_ashr_i32 s5, s5, 16
+; GFX6-NEXT: s_lshl_b32 s6, s6, 16
+; GFX6-NEXT: s_lshl_b32 s8, s14, 16
+; GFX6-NEXT: s_cmp_gt_i32 s6, 0
+; GFX6-NEXT: s_cselect_b32 s9, s6, 0
+; GFX6-NEXT: s_sub_i32 s9, s16, s9
+; GFX6-NEXT: s_cmp_lt_i32 s6, 0
+; GFX6-NEXT: s_cselect_b32 s10, s6, 0
+; GFX6-NEXT: s_sub_i32 s10, s17, s10
+; GFX6-NEXT: s_cmp_gt_i32 s10, s8
+; GFX6-NEXT: s_cselect_b32 s8, s10, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s9
+; GFX6-NEXT: s_cselect_b32 s8, s8, s9
+; GFX6-NEXT: s_add_i32 s6, s6, s8
+; GFX6-NEXT: s_ashr_i32 s6, s6, 16
+; GFX6-NEXT: s_lshl_b32 s7, s7, 16
+; GFX6-NEXT: s_lshl_b32 s8, s15, 16
+; GFX6-NEXT: s_cmp_gt_i32 s7, 0
+; GFX6-NEXT: s_cselect_b32 s9, s7, 0
+; GFX6-NEXT: s_sub_i32 s9, s16, s9
+; GFX6-NEXT: s_cmp_lt_i32 s7, 0
+; GFX6-NEXT: s_cselect_b32 s10, s7, 0
+; GFX6-NEXT: s_sub_i32 s10, s17, s10
+; GFX6-NEXT: s_cmp_gt_i32 s10, s8
+; GFX6-NEXT: s_cselect_b32 s8, s10, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s9
+; GFX6-NEXT: s_cselect_b32 s8, s8, s9
+; GFX6-NEXT: s_add_i32 s7, s7, s8
+; GFX6-NEXT: s_mov_b32 s8, 0xffff
+; GFX6-NEXT: s_and_b32 s1, s1, s8
+; GFX6-NEXT: s_and_b32 s0, s0, s8
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s8
+; GFX6-NEXT: s_and_b32 s2, s3, s8
+; GFX6-NEXT: s_and_b32 s3, s5, s8
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_ashr_i32 s7, s7, 16
+; GFX6-NEXT: s_or_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s2, s4, s8
+; GFX6-NEXT: s_and_b32 s4, s7, s8
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_or_b32 s2, s2, s3
+; GFX6-NEXT: s_and_b32 s3, s6, s8
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_or_b32 s3, s3, s4
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_v8i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshr_b32 s12, s4, 16
+; GFX8-NEXT: s_lshr_b32 s13, s5, 16
+; GFX8-NEXT: s_lshr_b32 s14, s6, 16
+; GFX8-NEXT: s_lshr_b32 s8, s0, 16
+; GFX8-NEXT: s_lshr_b32 s9, s1, 16
+; GFX8-NEXT: s_lshr_b32 s10, s2, 16
+; GFX8-NEXT: s_lshr_b32 s11, s3, 16
+; GFX8-NEXT: s_lshr_b32 s15, s7, 16
+; GFX8-NEXT: s_sext_i32_i16 s18, s0
+; GFX8-NEXT: s_sext_i32_i16 s19, 0
+; GFX8-NEXT: s_cmp_gt_i32 s18, s19
+; GFX8-NEXT: s_movk_i32 s16, 0x7fff
+; GFX8-NEXT: s_cselect_b32 s20, s18, s19
+; GFX8-NEXT: s_sub_i32 s20, s16, s20
+; GFX8-NEXT: s_cmp_lt_i32 s18, s19
+; GFX8-NEXT: s_mov_b32 s17, 0x8000
+; GFX8-NEXT: s_cselect_b32 s18, s18, s19
+; GFX8-NEXT: s_sub_i32 s18, s17, s18
+; GFX8-NEXT: s_sext_i32_i16 s18, s18
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_cmp_gt_i32 s18, s4
+; GFX8-NEXT: s_cselect_b32 s4, s18, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s18, s20
+; GFX8-NEXT: s_cmp_lt_i32 s4, s18
+; GFX8-NEXT: s_cselect_b32 s4, s4, s18
+; GFX8-NEXT: s_add_i32 s0, s0, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s8
+; GFX8-NEXT: s_cmp_gt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s18, s4, s19
+; GFX8-NEXT: s_sub_i32 s18, s16, s18
+; GFX8-NEXT: s_cmp_lt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s4, s4, s19
+; GFX8-NEXT: s_sub_i32 s4, s17, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s12, s12
+; GFX8-NEXT: s_cmp_gt_i32 s4, s12
+; GFX8-NEXT: s_cselect_b32 s4, s4, s12
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s12, s18
+; GFX8-NEXT: s_cmp_lt_i32 s4, s12
+; GFX8-NEXT: s_cselect_b32 s4, s4, s12
+; GFX8-NEXT: s_add_i32 s8, s8, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s1
+; GFX8-NEXT: s_cmp_gt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s12, s4, s19
+; GFX8-NEXT: s_sub_i32 s12, s16, s12
+; GFX8-NEXT: s_cmp_lt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s4, s4, s19
+; GFX8-NEXT: s_sub_i32 s4, s17, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_cmp_gt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_sext_i32_i16 s5, s12
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_cmp_lt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s1, s1, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s9
+; GFX8-NEXT: s_cmp_gt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s5, s4, s19
+; GFX8-NEXT: s_sub_i32 s5, s16, s5
+; GFX8-NEXT: s_cmp_lt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s4, s4, s19
+; GFX8-NEXT: s_sub_i32 s4, s17, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s12, s13
+; GFX8-NEXT: s_cmp_gt_i32 s4, s12
+; GFX8-NEXT: s_cselect_b32 s4, s4, s12
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_cmp_lt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s9, s9, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s2
+; GFX8-NEXT: s_cmp_gt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s5, s4, s19
+; GFX8-NEXT: s_sub_i32 s5, s16, s5
+; GFX8-NEXT: s_cmp_lt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s4, s4, s19
+; GFX8-NEXT: s_sub_i32 s4, s17, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_cmp_gt_i32 s4, s6
+; GFX8-NEXT: s_cselect_b32 s4, s4, s6
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_cmp_lt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s2, s2, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s10
+; GFX8-NEXT: s_cmp_gt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s5, s4, s19
+; GFX8-NEXT: s_sub_i32 s5, s16, s5
+; GFX8-NEXT: s_cmp_lt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s4, s4, s19
+; GFX8-NEXT: s_sub_i32 s4, s17, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s6, s14
+; GFX8-NEXT: s_cmp_gt_i32 s4, s6
+; GFX8-NEXT: s_cselect_b32 s4, s4, s6
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_cmp_lt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s10, s10, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s3
+; GFX8-NEXT: s_cmp_gt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s5, s4, s19
+; GFX8-NEXT: s_sub_i32 s5, s16, s5
+; GFX8-NEXT: s_cmp_lt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s4, s4, s19
+; GFX8-NEXT: s_sub_i32 s4, s17, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s6, s7
+; GFX8-NEXT: s_cmp_gt_i32 s4, s6
+; GFX8-NEXT: s_cselect_b32 s4, s4, s6
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_cmp_lt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s3, s3, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s11
+; GFX8-NEXT: s_cmp_gt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s5, s4, s19
+; GFX8-NEXT: s_sub_i32 s5, s16, s5
+; GFX8-NEXT: s_cmp_lt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s4, s4, s19
+; GFX8-NEXT: s_sub_i32 s4, s17, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s6, s15
+; GFX8-NEXT: s_cmp_gt_i32 s4, s6
+; GFX8-NEXT: s_cselect_b32 s4, s4, s6
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_cmp_lt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s11, s11, s4
+; GFX8-NEXT: s_bfe_u32 s4, s8, 0x100000
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s4
+; GFX8-NEXT: s_bfe_u32 s4, s9, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s1, s1, s4
+; GFX8-NEXT: s_bfe_u32 s4, s10, 0x100000
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s2, s2, s4
+; GFX8-NEXT: s_bfe_u32 s4, s11, 0x100000
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s3, s3, s4
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_v8i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, 0, 0
+; GFX9-NEXT: s_sext_i32_i16 s13, s10
+; GFX9-NEXT: s_sext_i32_i16 s11, s0
+; GFX9-NEXT: s_ashr_i32 s12, s0, 16
+; GFX9-NEXT: s_ashr_i32 s10, s10, 16
+; GFX9-NEXT: s_cmp_gt_i32 s11, s13
+; GFX9-NEXT: s_cselect_b32 s14, s11, s13
+; GFX9-NEXT: s_cmp_gt_i32 s12, s10
+; GFX9-NEXT: s_movk_i32 s8, 0x7fff
+; GFX9-NEXT: s_cselect_b32 s15, s12, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s15
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s8
+; GFX9-NEXT: s_lshr_b32 s16, s14, 16
+; GFX9-NEXT: s_lshr_b32 s15, s8, 16
+; GFX9-NEXT: s_sub_i32 s14, s8, s14
+; GFX9-NEXT: s_sub_i32 s16, s15, s16
+; GFX9-NEXT: s_cmp_lt_i32 s11, s13
+; GFX9-NEXT: s_cselect_b32 s11, s11, s13
+; GFX9-NEXT: s_cmp_lt_i32 s12, s10
+; GFX9-NEXT: s_mov_b32 s9, 0xffff8000
+; GFX9-NEXT: s_cselect_b32 s12, s12, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s9
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s16
+; GFX9-NEXT: s_lshr_b32 s16, s11, 16
+; GFX9-NEXT: s_lshr_b32 s12, s9, 16
+; GFX9-NEXT: s_sub_i32 s11, s9, s11
+; GFX9-NEXT: s_sub_i32 s16, s12, s16
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s16
+; GFX9-NEXT: s_sext_i32_i16 s16, s11
+; GFX9-NEXT: s_sext_i32_i16 s17, s4
+; GFX9-NEXT: s_ashr_i32 s11, s11, 16
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_cmp_gt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_cmp_gt_i32 s11, s4
+; GFX9-NEXT: s_cselect_b32 s4, s11, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s16, s4
+; GFX9-NEXT: s_sext_i32_i16 s11, s4
+; GFX9-NEXT: s_sext_i32_i16 s16, s14
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_ashr_i32 s14, s14, 16
+; GFX9-NEXT: s_cmp_lt_i32 s11, s16
+; GFX9-NEXT: s_cselect_b32 s11, s11, s16
+; GFX9-NEXT: s_cmp_lt_i32 s4, s14
+; GFX9-NEXT: s_cselect_b32 s4, s4, s14
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s11, s4
+; GFX9-NEXT: s_lshr_b32 s11, s0, 16
+; GFX9-NEXT: s_lshr_b32 s14, s4, 16
+; GFX9-NEXT: s_add_i32 s0, s0, s4
+; GFX9-NEXT: s_add_i32 s11, s11, s14
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s11
+; GFX9-NEXT: s_sext_i32_i16 s4, s1
+; GFX9-NEXT: s_ashr_i32 s11, s1, 16
+; GFX9-NEXT: s_cmp_gt_i32 s4, s13
+; GFX9-NEXT: s_cselect_b32 s14, s4, s13
+; GFX9-NEXT: s_cmp_gt_i32 s11, s10
+; GFX9-NEXT: s_cselect_b32 s16, s11, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s16
+; GFX9-NEXT: s_lshr_b32 s16, s14, 16
+; GFX9-NEXT: s_sub_i32 s14, s8, s14
+; GFX9-NEXT: s_sub_i32 s16, s15, s16
+; GFX9-NEXT: s_cmp_lt_i32 s4, s13
+; GFX9-NEXT: s_cselect_b32 s4, s4, s13
+; GFX9-NEXT: s_cmp_lt_i32 s11, s10
+; GFX9-NEXT: s_cselect_b32 s11, s11, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s11
+; GFX9-NEXT: s_lshr_b32 s11, s4, 16
+; GFX9-NEXT: s_sub_i32 s4, s9, s4
+; GFX9-NEXT: s_sub_i32 s11, s12, s11
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s11
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s16
+; GFX9-NEXT: s_sext_i32_i16 s11, s4
+; GFX9-NEXT: s_sext_i32_i16 s16, s5
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_ashr_i32 s5, s5, 16
+; GFX9-NEXT: s_cmp_gt_i32 s11, s16
+; GFX9-NEXT: s_cselect_b32 s11, s11, s16
+; GFX9-NEXT: s_cmp_gt_i32 s4, s5
+; GFX9-NEXT: s_cselect_b32 s4, s4, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s11, s4
+; GFX9-NEXT: s_sext_i32_i16 s5, s4
+; GFX9-NEXT: s_sext_i32_i16 s11, s14
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_ashr_i32 s14, s14, 16
+; GFX9-NEXT: s_cmp_lt_i32 s5, s11
+; GFX9-NEXT: s_cselect_b32 s5, s5, s11
+; GFX9-NEXT: s_cmp_lt_i32 s4, s14
+; GFX9-NEXT: s_cselect_b32 s4, s4, s14
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s5, s4
+; GFX9-NEXT: s_lshr_b32 s5, s1, 16
+; GFX9-NEXT: s_lshr_b32 s11, s4, 16
+; GFX9-NEXT: s_add_i32 s1, s1, s4
+; GFX9-NEXT: s_add_i32 s5, s5, s11
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s5
+; GFX9-NEXT: s_sext_i32_i16 s4, s2
+; GFX9-NEXT: s_ashr_i32 s5, s2, 16
+; GFX9-NEXT: s_cmp_gt_i32 s4, s13
+; GFX9-NEXT: s_cselect_b32 s11, s4, s13
+; GFX9-NEXT: s_cmp_gt_i32 s5, s10
+; GFX9-NEXT: s_cselect_b32 s14, s5, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s14
+; GFX9-NEXT: s_lshr_b32 s14, s11, 16
+; GFX9-NEXT: s_sub_i32 s11, s8, s11
+; GFX9-NEXT: s_sub_i32 s14, s15, s14
+; GFX9-NEXT: s_cmp_lt_i32 s4, s13
+; GFX9-NEXT: s_cselect_b32 s4, s4, s13
+; GFX9-NEXT: s_cmp_lt_i32 s5, s10
+; GFX9-NEXT: s_cselect_b32 s5, s5, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX9-NEXT: s_lshr_b32 s5, s4, 16
+; GFX9-NEXT: s_sub_i32 s4, s9, s4
+; GFX9-NEXT: s_sub_i32 s5, s12, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s14
+; GFX9-NEXT: s_sext_i32_i16 s5, s4
+; GFX9-NEXT: s_sext_i32_i16 s14, s6
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_ashr_i32 s6, s6, 16
+; GFX9-NEXT: s_cmp_gt_i32 s5, s14
+; GFX9-NEXT: s_cselect_b32 s5, s5, s14
+; GFX9-NEXT: s_cmp_gt_i32 s4, s6
+; GFX9-NEXT: s_cselect_b32 s4, s4, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s5, s4
+; GFX9-NEXT: s_sext_i32_i16 s5, s4
+; GFX9-NEXT: s_sext_i32_i16 s6, s11
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_ashr_i32 s11, s11, 16
+; GFX9-NEXT: s_cmp_lt_i32 s5, s6
+; GFX9-NEXT: s_cselect_b32 s5, s5, s6
+; GFX9-NEXT: s_cmp_lt_i32 s4, s11
+; GFX9-NEXT: s_cselect_b32 s4, s4, s11
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s5, s4
+; GFX9-NEXT: s_lshr_b32 s5, s2, 16
+; GFX9-NEXT: s_lshr_b32 s6, s4, 16
+; GFX9-NEXT: s_add_i32 s2, s2, s4
+; GFX9-NEXT: s_add_i32 s5, s5, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s5
+; GFX9-NEXT: s_sext_i32_i16 s4, s3
+; GFX9-NEXT: s_ashr_i32 s5, s3, 16
+; GFX9-NEXT: s_cmp_gt_i32 s4, s13
+; GFX9-NEXT: s_cselect_b32 s6, s4, s13
+; GFX9-NEXT: s_cmp_gt_i32 s5, s10
+; GFX9-NEXT: s_cselect_b32 s11, s5, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s11
+; GFX9-NEXT: s_lshr_b32 s11, s6, 16
+; GFX9-NEXT: s_sub_i32 s6, s8, s6
+; GFX9-NEXT: s_sub_i32 s8, s15, s11
+; GFX9-NEXT: s_cmp_lt_i32 s4, s13
+; GFX9-NEXT: s_cselect_b32 s4, s4, s13
+; GFX9-NEXT: s_cmp_lt_i32 s5, s10
+; GFX9-NEXT: s_cselect_b32 s5, s5, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX9-NEXT: s_lshr_b32 s5, s4, 16
+; GFX9-NEXT: s_sub_i32 s4, s9, s4
+; GFX9-NEXT: s_sub_i32 s5, s12, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s8
+; GFX9-NEXT: s_sext_i32_i16 s5, s4
+; GFX9-NEXT: s_sext_i32_i16 s8, s7
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_ashr_i32 s7, s7, 16
+; GFX9-NEXT: s_cmp_gt_i32 s5, s8
+; GFX9-NEXT: s_cselect_b32 s5, s5, s8
+; GFX9-NEXT: s_cmp_gt_i32 s4, s7
+; GFX9-NEXT: s_cselect_b32 s4, s4, s7
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s5, s4
+; GFX9-NEXT: s_sext_i32_i16 s5, s4
+; GFX9-NEXT: s_sext_i32_i16 s7, s6
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_ashr_i32 s6, s6, 16
+; GFX9-NEXT: s_cmp_lt_i32 s5, s7
+; GFX9-NEXT: s_cselect_b32 s5, s5, s7
+; GFX9-NEXT: s_cmp_lt_i32 s4, s6
+; GFX9-NEXT: s_cselect_b32 s4, s4, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s5, s4
+; GFX9-NEXT: s_lshr_b32 s5, s3, 16
+; GFX9-NEXT: s_lshr_b32 s6, s4, 16
+; GFX9-NEXT: s_add_i32 s3, s3, s4
+; GFX9-NEXT: s_add_i32 s5, s5, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s5
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_v8i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s8, 0, 0
+; GFX10-NEXT: s_sext_i32_i16 s9, s0
+; GFX10-NEXT: s_sext_i32_i16 s11, s8
+; GFX10-NEXT: s_ashr_i32 s10, s0, 16
+; GFX10-NEXT: s_ashr_i32 s8, s8, 16
+; GFX10-NEXT: s_cmp_gt_i32 s9, s11
+; GFX10-NEXT: s_movk_i32 s13, 0x7fff
+; GFX10-NEXT: s_cselect_b32 s12, s9, s11
+; GFX10-NEXT: s_cmp_gt_i32 s10, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s13, s13, s13
+; GFX10-NEXT: s_cselect_b32 s14, s10, s8
+; GFX10-NEXT: s_mov_b32 s16, 0xffff8000
+; GFX10-NEXT: s_pack_ll_b32_b16 s12, s12, s14
+; GFX10-NEXT: s_lshr_b32 s14, s13, 16
+; GFX10-NEXT: s_lshr_b32 s15, s12, 16
+; GFX10-NEXT: s_sub_i32 s12, s13, s12
+; GFX10-NEXT: s_sub_i32 s15, s14, s15
+; GFX10-NEXT: s_cmp_lt_i32 s9, s11
+; GFX10-NEXT: s_pack_ll_b32_b16 s16, s16, s16
+; GFX10-NEXT: s_cselect_b32 s9, s9, s11
+; GFX10-NEXT: s_cmp_lt_i32 s10, s8
+; GFX10-NEXT: s_sext_i32_i16 s18, s4
+; GFX10-NEXT: s_cselect_b32 s10, s10, s8
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s9, s9, s10
+; GFX10-NEXT: s_lshr_b32 s10, s16, 16
+; GFX10-NEXT: s_lshr_b32 s17, s9, 16
+; GFX10-NEXT: s_sub_i32 s9, s16, s9
+; GFX10-NEXT: s_sub_i32 s17, s10, s17
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_pack_ll_b32_b16 s9, s9, s17
+; GFX10-NEXT: s_sext_i32_i16 s17, s9
+; GFX10-NEXT: s_ashr_i32 s9, s9, 16
+; GFX10-NEXT: s_cmp_gt_i32 s17, s18
+; GFX10-NEXT: s_cselect_b32 s17, s17, s18
+; GFX10-NEXT: s_cmp_gt_i32 s9, s4
+; GFX10-NEXT: s_sext_i32_i16 s18, s5
+; GFX10-NEXT: s_cselect_b32 s4, s9, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s9, s12, s15
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s17, s4
+; GFX10-NEXT: s_sext_i32_i16 s15, s9
+; GFX10-NEXT: s_sext_i32_i16 s12, s4
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_ashr_i32 s9, s9, 16
+; GFX10-NEXT: s_cmp_lt_i32 s12, s15
+; GFX10-NEXT: s_cselect_b32 s12, s12, s15
+; GFX10-NEXT: s_cmp_lt_i32 s4, s9
+; GFX10-NEXT: s_cselect_b32 s4, s4, s9
+; GFX10-NEXT: s_lshr_b32 s9, s0, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s12, s4
+; GFX10-NEXT: s_lshr_b32 s12, s4, 16
+; GFX10-NEXT: s_add_i32 s0, s0, s4
+; GFX10-NEXT: s_sext_i32_i16 s4, s1
+; GFX10-NEXT: s_add_i32 s9, s9, s12
+; GFX10-NEXT: s_ashr_i32 s12, s1, 16
+; GFX10-NEXT: s_cmp_gt_i32 s4, s11
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s9
+; GFX10-NEXT: s_cselect_b32 s15, s4, s11
+; GFX10-NEXT: s_cmp_gt_i32 s12, s8
+; GFX10-NEXT: s_cselect_b32 s17, s12, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s15, s15, s17
+; GFX10-NEXT: s_lshr_b32 s17, s15, 16
+; GFX10-NEXT: s_sub_i32 s15, s13, s15
+; GFX10-NEXT: s_sub_i32 s17, s14, s17
+; GFX10-NEXT: s_cmp_lt_i32 s4, s11
+; GFX10-NEXT: s_cselect_b32 s4, s4, s11
+; GFX10-NEXT: s_cmp_lt_i32 s12, s8
+; GFX10-NEXT: s_cselect_b32 s12, s12, s8
+; GFX10-NEXT: s_ashr_i32 s5, s5, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s4, s12
+; GFX10-NEXT: s_lshr_b32 s12, s4, 16
+; GFX10-NEXT: s_sub_i32 s4, s16, s4
+; GFX10-NEXT: s_sub_i32 s12, s10, s12
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s4, s12
+; GFX10-NEXT: s_sext_i32_i16 s12, s4
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_cmp_gt_i32 s12, s18
+; GFX10-NEXT: s_cselect_b32 s12, s12, s18
+; GFX10-NEXT: s_cmp_gt_i32 s4, s5
+; GFX10-NEXT: s_sext_i32_i16 s18, s6
+; GFX10-NEXT: s_cselect_b32 s4, s4, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s15, s17
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s12, s4
+; GFX10-NEXT: s_sext_i32_i16 s15, s5
+; GFX10-NEXT: s_sext_i32_i16 s12, s4
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_ashr_i32 s5, s5, 16
+; GFX10-NEXT: s_cmp_lt_i32 s12, s15
+; GFX10-NEXT: s_cselect_b32 s12, s12, s15
+; GFX10-NEXT: s_cmp_lt_i32 s4, s5
+; GFX10-NEXT: s_cselect_b32 s4, s4, s5
+; GFX10-NEXT: s_lshr_b32 s5, s1, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s12, s4
+; GFX10-NEXT: s_lshr_b32 s12, s4, 16
+; GFX10-NEXT: s_add_i32 s1, s1, s4
+; GFX10-NEXT: s_sext_i32_i16 s4, s2
+; GFX10-NEXT: s_add_i32 s5, s5, s12
+; GFX10-NEXT: s_ashr_i32 s12, s2, 16
+; GFX10-NEXT: s_cmp_gt_i32 s4, s11
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s5
+; GFX10-NEXT: s_cselect_b32 s15, s4, s11
+; GFX10-NEXT: s_cmp_gt_i32 s12, s8
+; GFX10-NEXT: s_cselect_b32 s17, s12, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s15, s15, s17
+; GFX10-NEXT: s_lshr_b32 s17, s15, 16
+; GFX10-NEXT: s_sub_i32 s15, s13, s15
+; GFX10-NEXT: s_sub_i32 s17, s14, s17
+; GFX10-NEXT: s_cmp_lt_i32 s4, s11
+; GFX10-NEXT: s_pack_ll_b32_b16 s15, s15, s17
+; GFX10-NEXT: s_cselect_b32 s4, s4, s11
+; GFX10-NEXT: s_cmp_lt_i32 s12, s8
+; GFX10-NEXT: s_cselect_b32 s12, s12, s8
+; GFX10-NEXT: s_ashr_i32 s6, s6, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s4, s12
+; GFX10-NEXT: s_lshr_b32 s12, s4, 16
+; GFX10-NEXT: s_sub_i32 s4, s16, s4
+; GFX10-NEXT: s_sub_i32 s12, s10, s12
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s4, s12
+; GFX10-NEXT: s_sext_i32_i16 s12, s4
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_cmp_gt_i32 s12, s18
+; GFX10-NEXT: s_cselect_b32 s12, s12, s18
+; GFX10-NEXT: s_cmp_gt_i32 s4, s6
+; GFX10-NEXT: s_cselect_b32 s4, s4, s6
+; GFX10-NEXT: s_sext_i32_i16 s6, s15
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s12, s4
+; GFX10-NEXT: s_ashr_i32 s12, s15, 16
+; GFX10-NEXT: s_sext_i32_i16 s15, s4
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_cmp_lt_i32 s15, s6
+; GFX10-NEXT: s_cselect_b32 s6, s15, s6
+; GFX10-NEXT: s_cmp_lt_i32 s4, s12
+; GFX10-NEXT: s_sext_i32_i16 s15, s3
+; GFX10-NEXT: s_cselect_b32 s4, s4, s12
+; GFX10-NEXT: s_lshr_b32 s12, s2, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s6, s4
+; GFX10-NEXT: s_ashr_i32 s6, s3, 16
+; GFX10-NEXT: s_lshr_b32 s17, s4, 16
+; GFX10-NEXT: s_add_i32 s2, s2, s4
+; GFX10-NEXT: s_add_i32 s12, s12, s17
+; GFX10-NEXT: s_cmp_gt_i32 s15, s11
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s12
+; GFX10-NEXT: s_cselect_b32 s4, s15, s11
+; GFX10-NEXT: s_cmp_gt_i32 s6, s8
+; GFX10-NEXT: s_cselect_b32 s17, s6, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s4, s17
+; GFX10-NEXT: s_lshr_b32 s17, s4, 16
+; GFX10-NEXT: s_sub_i32 s4, s13, s4
+; GFX10-NEXT: s_sub_i32 s13, s14, s17
+; GFX10-NEXT: s_cmp_lt_i32 s15, s11
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s4, s13
+; GFX10-NEXT: s_cselect_b32 s11, s15, s11
+; GFX10-NEXT: s_cmp_lt_i32 s6, s8
+; GFX10-NEXT: s_cselect_b32 s6, s6, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s11, s6
+; GFX10-NEXT: s_lshr_b32 s8, s6, 16
+; GFX10-NEXT: s_sub_i32 s6, s16, s6
+; GFX10-NEXT: s_sub_i32 s8, s10, s8
+; GFX10-NEXT: s_sext_i32_i16 s10, s7
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s6, s8
+; GFX10-NEXT: s_ashr_i32 s7, s7, 16
+; GFX10-NEXT: s_sext_i32_i16 s8, s6
+; GFX10-NEXT: s_ashr_i32 s6, s6, 16
+; GFX10-NEXT: s_cmp_gt_i32 s8, s10
+; GFX10-NEXT: s_cselect_b32 s8, s8, s10
+; GFX10-NEXT: s_cmp_gt_i32 s6, s7
+; GFX10-NEXT: s_cselect_b32 s6, s6, s7
+; GFX10-NEXT: s_sext_i32_i16 s7, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s8, s6
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_sext_i32_i16 s8, s6
+; GFX10-NEXT: s_ashr_i32 s6, s6, 16
+; GFX10-NEXT: s_cmp_lt_i32 s8, s7
+; GFX10-NEXT: s_cselect_b32 s7, s8, s7
+; GFX10-NEXT: s_cmp_lt_i32 s6, s4
+; GFX10-NEXT: s_cselect_b32 s4, s6, s4
+; GFX10-NEXT: s_lshr_b32 s5, s3, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s7, s4
+; GFX10-NEXT: s_lshr_b32 s6, s4, 16
+; GFX10-NEXT: s_add_i32 s3, s3, s4
+; GFX10-NEXT: s_add_i32 s5, s5, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s5
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+ %cast = bitcast <8 x i16> %result to <4 x i32>
+ ret <4 x i32> %cast
+}
+
+; FIXME: i48 broken because i48 add broken
+; define i48 @v_saddsat_i48(i48 %lhs, i48 %rhs) {
+; %result = call i48 @llvm.sadd.sat.i48(i48 %lhs, i48 %rhs)
+; ret i48 %result
+; }
+
+; define amdgpu_ps i48 @s_saddsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
+; %result = call i48 @llvm.sadd.sat.i48(i48 %lhs, i48 %rhs)
+; ret i48 %result
+; }
+
+; define amdgpu_ps <2 x float> @saddsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
+; %result = call i48 @llvm.sadd.sat.i48(i48 %lhs, i48 %rhs)
+; %ext.result = zext i48 %result to i64
+; %cast = bitcast i64 %ext.result to <2 x float>
+; ret <2 x float> %cast
+; }
+
+; define amdgpu_ps <2 x float> @saddsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
+; %result = call i48 @llvm.sadd.sat.i48(i48 %lhs, i48 %rhs)
+; %ext.result = zext i48 %result to i64
+; %cast = bitcast i64 %ext.result to <2 x float>
+; ret <2 x float> %cast
+; }
+
+define i64 @v_saddsat_i64(i64 %lhs, i64 %rhs) {
+; GFX6-LABEL: v_saddsat_i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, v0, v2
+; GFX6-NEXT: v_addc_u32_e32 v5, vcc, v1, v3, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX6-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v5
+; GFX6-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX6-NEXT: v_add_i32_e64 v2, s[6:7], 0, v0
+; GFX6-NEXT: v_addc_u32_e64 v1, s[6:7], v0, v1, s[6:7]
+; GFX6-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v0, v2
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v1, v3, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX8-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
+; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v5
+; GFX8-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX8-NEXT: v_add_u32_e64 v2, s[6:7], 0, v0
+; GFX8-NEXT: v_addc_u32_e64 v1, s[6:7], v0, v1, s[6:7]
+; GFX8-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v1, v3, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[2:3]
+; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v5
+; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX9-NEXT: v_add_co_u32_e64 v2, s[6:7], 0, v0
+; GFX9-NEXT: v_addc_co_u32_e64 v1, s[6:7], v0, v1, s[6:7]
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_add_co_u32_e64 v10, vcc_lo, v0, v2
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_add_co_ci_u32_e32 v11, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, 0, v[2:3]
+; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v11
+; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[10:11], v[0:1]
+; GFX10-NEXT: v_add_co_u32_e64 v0, s5, v6, 0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s5, 0x80000000, v6, s5
+; GFX10-NEXT: s_xor_b32 vcc_lo, vcc_lo, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v10, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v11, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i64 @llvm.sadd.sat.i64(i64 %lhs, i64 %rhs)
+ ret i64 %result
+}
+
+define amdgpu_ps i64 @s_saddsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
+; GFX6-LABEL: s_saddsat_i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_add_u32 s4, s0, s2
+; GFX6-NEXT: s_cselect_b32 s5, 1, 0
+; GFX6-NEXT: s_and_b32 s5, s5, 1
+; GFX6-NEXT: s_cmp_lg_u32 s5, 0
+; GFX6-NEXT: v_mov_b32_e32 v0, s0
+; GFX6-NEXT: s_addc_u32 s5, s1, s3
+; GFX6-NEXT: v_mov_b32_e32 v1, s1
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
+; GFX6-NEXT: v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
+; GFX6-NEXT: s_ashr_i32 s2, s5, 31
+; GFX6-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX6-NEXT: s_add_u32 s0, s2, 0
+; GFX6-NEXT: s_cselect_b32 s1, 1, 0
+; GFX6-NEXT: s_and_b32 s1, s1, 1
+; GFX6-NEXT: s_cmp_lg_u32 s1, 0
+; GFX6-NEXT: s_addc_u32 s1, s2, 0x80000000
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: v_mov_b32_e32 v1, s0
+; GFX6-NEXT: v_mov_b32_e32 v2, s1
+; GFX6-NEXT: v_mov_b32_e32 v3, s5
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v0
+; GFX6-NEXT: v_readfirstlane_b32 s1, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_add_u32 s4, s0, s2
+; GFX8-NEXT: s_cselect_b32 s5, 1, 0
+; GFX8-NEXT: s_and_b32 s5, s5, 1
+; GFX8-NEXT: s_cmp_lg_u32 s5, 0
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: s_addc_u32 s5, s1, s3
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
+; GFX8-NEXT: v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
+; GFX8-NEXT: s_ashr_i32 s2, s5, 31
+; GFX8-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX8-NEXT: s_add_u32 s0, s2, 0
+; GFX8-NEXT: s_cselect_b32 s1, 1, 0
+; GFX8-NEXT: s_and_b32 s1, s1, 1
+; GFX8-NEXT: s_cmp_lg_u32 s1, 0
+; GFX8-NEXT: s_addc_u32 s1, s2, 0x80000000
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: v_mov_b32_e32 v1, s0
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: v_mov_b32_e32 v3, s5
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v0
+; GFX8-NEXT: v_readfirstlane_b32 s1, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_u32 s4, s0, s2
+; GFX9-NEXT: s_cselect_b32 s5, 1, 0
+; GFX9-NEXT: s_and_b32 s5, s5, 1
+; GFX9-NEXT: s_cmp_lg_u32 s5, 0
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: s_addc_u32 s5, s1, s3
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
+; GFX9-NEXT: s_ashr_i32 s2, s5, 31
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX9-NEXT: s_add_u32 s0, s2, 0
+; GFX9-NEXT: s_cselect_b32 s1, 1, 0
+; GFX9-NEXT: s_and_b32 s1, s1, 1
+; GFX9-NEXT: s_cmp_lg_u32 s1, 0
+; GFX9-NEXT: s_addc_u32 s1, s2, 0x80000000
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: v_readfirstlane_b32 s1, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_add_u32 s4, s0, s2
+; GFX10-NEXT: s_cselect_b32 s5, 1, 0
+; GFX10-NEXT: v_mov_b32_e32 v0, s4
+; GFX10-NEXT: s_and_b32 s5, s5, 1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lg_u32 s5, 0
+; GFX10-NEXT: s_addc_u32 s5, s1, s3
+; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1]
+; GFX10-NEXT: v_cmp_lt_i64_e64 s1, s[2:3], 0
+; GFX10-NEXT: s_ashr_i32 s2, s5, 31
+; GFX10-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-NEXT: s_xor_b32 s3, s1, s0
+; GFX10-NEXT: s_add_u32 s0, s2, 0
+; GFX10-NEXT: s_cselect_b32 s1, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s0, s3
+; GFX10-NEXT: s_and_b32 s1, s1, 1
+; GFX10-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10-NEXT: s_addc_u32 s1, s2, 0x80000000
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s1, s3
+; GFX10-NEXT: v_readfirstlane_b32 s1, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i64 @llvm.sadd.sat.i64(i64 %lhs, i64 %rhs)
+ ret i64 %result
+}
+
+define amdgpu_ps <2 x float> @saddsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
+; GFX6-LABEL: saddsat_i64_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v3, s1
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, s0, v0
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v3, v1, vcc
+; GFX6-NEXT: v_cmp_gt_i64_e32 vcc, s[0:1], v[2:3]
+; GFX6-NEXT: v_cmp_gt_i64_e64 s[0:1], 0, v[0:1]
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; GFX6-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX6-NEXT: v_add_i32_e64 v4, s[2:3], 0, v0
+; GFX6-NEXT: v_addc_u32_e64 v1, s[2:3], v0, v1, s[2:3]
+; GFX6-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: saddsat_i64_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v0
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v1, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, s[0:1], v[2:3]
+; GFX8-NEXT: v_cmp_gt_i64_e64 s[0:1], 0, v[0:1]
+; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; GFX8-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX8-NEXT: v_add_u32_e64 v4, s[2:3], 0, v0
+; GFX8-NEXT: v_addc_u32_e64 v1, s[2:3], v0, v1, s[2:3]
+; GFX8-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: saddsat_i64_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v1, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], 0, v[0:1]
+; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], 0, v0
+; GFX9-NEXT: v_addc_co_u32_e64 v1, s[2:3], v0, v1, s[2:3]
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: saddsat_i64_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_add_co_u32_e64 v2, vcc_lo, s0, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, 0, v[0:1]
+; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
+; GFX10-NEXT: v_cmp_gt_i64_e64 s0, s[0:1], v[2:3]
+; GFX10-NEXT: v_add_co_u32_e64 v0, s1, v4, 0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s1, 0x80000000, v4, s1
+; GFX10-NEXT: s_xor_b32 vcc_lo, vcc_lo, s0
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i64 @llvm.sadd.sat.i64(i64 %lhs, i64 %rhs)
+ %cast = bitcast i64 %result to <2 x float>
+ ret <2 x float> %cast
+}
+
+define amdgpu_ps <2 x float> @saddsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
+; GFX6-LABEL: saddsat_i64_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v3, s1
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, s0, v0
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v1, v3, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[0:1]
+; GFX6-NEXT: v_cmp_lt_i64_e64 s[2:3], s[0:1], 0
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; GFX6-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX6-NEXT: v_add_i32_e64 v4, s[0:1], 0, v0
+; GFX6-NEXT: v_addc_u32_e64 v1, s[0:1], v0, v1, s[0:1]
+; GFX6-NEXT: s_xor_b64 vcc, s[2:3], vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: saddsat_i64_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v0
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v1, v3, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_lt_i64_e64 s[2:3], s[0:1], 0
+; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; GFX8-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX8-NEXT: v_add_u32_e64 v4, s[0:1], 0, v0
+; GFX8-NEXT: v_addc_u32_e64 v1, s[0:1], v0, v1, s[0:1]
+; GFX8-NEXT: s_xor_b64 vcc, s[2:3], vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: saddsat_i64_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v1, v3, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], s[0:1], 0
+; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX9-NEXT: v_add_co_u32_e64 v4, s[0:1], 0, v0
+; GFX9-NEXT: v_addc_co_u32_e64 v1, s[0:1], v0, v1, s[0:1]
+; GFX9-NEXT: s_xor_b64 vcc, s[2:3], vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: saddsat_i64_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_add_co_u32_e64 v2, vcc_lo, v0, s0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e64 s1, s[0:1], 0
+; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX10-NEXT: v_add_co_u32_e64 v0, s0, v4, 0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, 0x80000000, v4, s0
+; GFX10-NEXT: s_xor_b32 vcc_lo, s1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i64 @llvm.sadd.sat.i64(i64 %lhs, i64 %rhs)
+ %cast = bitcast i64 %result to <2 x float>
+ ret <2 x float> %cast
+}
+
+define <2 x i64> @v_saddsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; GFX6-LABEL: v_saddsat_v2i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_add_i32_e32 v8, vcc, v0, v4
+; GFX6-NEXT: v_addc_u32_e32 v9, vcc, v1, v5, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, v[8:9], v[0:1]
+; GFX6-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[4:5]
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v9
+; GFX6-NEXT: s_brev_b32 s8, 1
+; GFX6-NEXT: v_mov_b32_e32 v1, s8
+; GFX6-NEXT: v_add_i32_e64 v4, s[6:7], 0, v0
+; GFX6-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX6-NEXT: v_addc_u32_e64 v1, s[6:7], v0, v1, s[6:7]
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v8, v4, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, v2, v6
+; GFX6-NEXT: v_addc_u32_e32 v5, vcc, v3, v7, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[2:3]
+; GFX6-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[6:7]
+; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v5
+; GFX6-NEXT: v_mov_b32_e32 v3, s8
+; GFX6-NEXT: v_add_i32_e64 v6, s[6:7], 0, v2
+; GFX6-NEXT: v_addc_u32_e64 v3, s[6:7], v2, v3, s[6:7]
+; GFX6-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_v2i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v8, vcc, v0, v4
+; GFX8-NEXT: v_addc_u32_e32 v9, vcc, v1, v5, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[8:9], v[0:1]
+; GFX8-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[4:5]
+; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v9
+; GFX8-NEXT: s_brev_b32 s8, 1
+; GFX8-NEXT: v_mov_b32_e32 v1, s8
+; GFX8-NEXT: v_add_u32_e64 v4, s[6:7], 0, v0
+; GFX8-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX8-NEXT: v_addc_u32_e64 v1, s[6:7], v0, v1, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v2, v6
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v3, v7, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[2:3]
+; GFX8-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[6:7]
+; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v5
+; GFX8-NEXT: v_mov_b32_e32 v3, s8
+; GFX8-NEXT: v_add_u32_e64 v6, s[6:7], 0, v2
+; GFX8-NEXT: v_addc_u32_e64 v3, s[6:7], v2, v3, s[6:7]
+; GFX8-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_v2i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v0, v4
+; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v1, v5, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[8:9], v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[4:5]
+; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v9
+; GFX9-NEXT: s_brev_b32 s8, 1
+; GFX9-NEXT: v_mov_b32_e32 v1, s8
+; GFX9-NEXT: v_add_co_u32_e64 v4, s[6:7], 0, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX9-NEXT: v_addc_co_u32_e64 v1, s[6:7], v0, v1, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v2, v6
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v3, v7, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[2:3]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[4:5], 0, v[6:7]
+; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v5
+; GFX9-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-NEXT: v_add_co_u32_e64 v6, s[6:7], 0, v2
+; GFX9-NEXT: v_addc_co_u32_e64 v3, s[6:7], v2, v3, s[6:7]
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_v2i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_mov_b32_e32 v9, v0
+; GFX10-NEXT: v_mov_b32_e32 v10, v1
+; GFX10-NEXT: v_mov_b32_e32 v13, v2
+; GFX10-NEXT: v_mov_b32_e32 v14, v3
+; GFX10-NEXT: v_cmp_gt_i64_e64 s4, 0, v[4:5]
+; GFX10-NEXT: v_add_co_u32_e64 v19, vcc_lo, v9, v4
+; GFX10-NEXT: s_brev_b32 s8, 1
+; GFX10-NEXT: v_add_co_ci_u32_e32 v20, vcc_lo, v10, v5, vcc_lo
+; GFX10-NEXT: v_add_co_u32_e64 v23, vcc_lo, v13, v6
+; GFX10-NEXT: v_cmp_gt_i64_e64 s6, 0, v[6:7]
+; GFX10-NEXT: v_add_co_ci_u32_e32 v24, vcc_lo, v14, v7, vcc_lo
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_ashrrev_i32_e32 v0, 31, v20
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[19:20], v[9:10]
+; GFX10-NEXT: v_ashrrev_i32_e32 v1, 31, v24
+; GFX10-NEXT: v_add_co_u32_e64 v4, s5, v0, 0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v5, s5, s8, v0, s5
+; GFX10-NEXT: v_cmp_lt_i64_e64 s5, v[23:24], v[13:14]
+; GFX10-NEXT: v_add_co_u32_e64 v2, s7, v1, 0
+; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s7, s8, v1, s7
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v19, v4, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v20, v5, vcc_lo
+; GFX10-NEXT: s_xor_b32 vcc_lo, s6, s5
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v23, v2, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v24, v3, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+ ret <2 x i64> %result
+}
+
+define amdgpu_ps <2 x i64> @s_saddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inreg %rhs) {
+; GFX6-LABEL: s_saddsat_v2i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_add_u32 s8, s0, s4
+; GFX6-NEXT: s_cselect_b32 s9, 1, 0
+; GFX6-NEXT: s_and_b32 s9, s9, 1
+; GFX6-NEXT: s_cmp_lg_u32 s9, 0
+; GFX6-NEXT: v_mov_b32_e32 v0, s0
+; GFX6-NEXT: s_addc_u32 s9, s1, s5
+; GFX6-NEXT: v_mov_b32_e32 v1, s1
+; GFX6-NEXT: v_cmp_lt_i64_e64 s[0:1], s[4:5], 0
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
+; GFX6-NEXT: s_ashr_i32 s4, s9, 31
+; GFX6-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX6-NEXT: s_add_u32 s0, s4, 0
+; GFX6-NEXT: s_cselect_b32 s1, 1, 0
+; GFX6-NEXT: s_and_b32 s1, s1, 1
+; GFX6-NEXT: s_brev_b32 s5, 1
+; GFX6-NEXT: s_cmp_lg_u32 s1, 0
+; GFX6-NEXT: s_addc_u32 s1, s4, s5
+; GFX6-NEXT: v_mov_b32_e32 v1, s0
+; GFX6-NEXT: s_add_u32 s0, s2, s6
+; GFX6-NEXT: v_mov_b32_e32 v2, s1
+; GFX6-NEXT: s_cselect_b32 s1, 1, 0
+; GFX6-NEXT: s_and_b32 s1, s1, 1
+; GFX6-NEXT: v_mov_b32_e32 v0, s8
+; GFX6-NEXT: s_cmp_lg_u32 s1, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc
+; GFX6-NEXT: v_mov_b32_e32 v0, s2
+; GFX6-NEXT: s_addc_u32 s1, s3, s7
+; GFX6-NEXT: v_mov_b32_e32 v3, s9
+; GFX6-NEXT: v_mov_b32_e32 v1, s3
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
+; GFX6-NEXT: v_cmp_lt_i64_e64 s[2:3], s[6:7], 0
+; GFX6-NEXT: s_ashr_i32 s4, s1, 31
+; GFX6-NEXT: s_xor_b64 vcc, s[2:3], vcc
+; GFX6-NEXT: v_mov_b32_e32 v0, s0
+; GFX6-NEXT: s_add_u32 s0, s4, 0
+; GFX6-NEXT: s_cselect_b32 s2, 1, 0
+; GFX6-NEXT: s_and_b32 s2, s2, 1
+; GFX6-NEXT: s_cmp_lg_u32 s2, 0
+; GFX6-NEXT: s_addc_u32 s3, s4, s5
+; GFX6-NEXT: v_mov_b32_e32 v1, s0
+; GFX6-NEXT: v_mov_b32_e32 v5, s1
+; GFX6-NEXT: v_mov_b32_e32 v3, s3
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v4
+; GFX6-NEXT: v_readfirstlane_b32 s1, v2
+; GFX6-NEXT: v_readfirstlane_b32 s2, v0
+; GFX6-NEXT: v_readfirstlane_b32 s3, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_v2i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_add_u32 s8, s0, s4
+; GFX8-NEXT: s_cselect_b32 s9, 1, 0
+; GFX8-NEXT: s_and_b32 s9, s9, 1
+; GFX8-NEXT: s_cmp_lg_u32 s9, 0
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: s_addc_u32 s9, s1, s5
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: v_cmp_lt_i64_e64 s[0:1], s[4:5], 0
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
+; GFX8-NEXT: s_ashr_i32 s4, s9, 31
+; GFX8-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX8-NEXT: s_add_u32 s0, s4, 0
+; GFX8-NEXT: s_cselect_b32 s1, 1, 0
+; GFX8-NEXT: s_and_b32 s1, s1, 1
+; GFX8-NEXT: s_brev_b32 s5, 1
+; GFX8-NEXT: s_cmp_lg_u32 s1, 0
+; GFX8-NEXT: s_addc_u32 s1, s4, s5
+; GFX8-NEXT: v_mov_b32_e32 v1, s0
+; GFX8-NEXT: s_add_u32 s0, s2, s6
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: s_cselect_b32 s1, 1, 0
+; GFX8-NEXT: s_and_b32 s1, s1, 1
+; GFX8-NEXT: v_mov_b32_e32 v0, s8
+; GFX8-NEXT: s_cmp_lg_u32 s1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: s_addc_u32 s1, s3, s7
+; GFX8-NEXT: v_mov_b32_e32 v3, s9
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT: v_cmp_lt_i64_e64 s[2:3], s[6:7], 0
+; GFX8-NEXT: s_ashr_i32 s4, s1, 31
+; GFX8-NEXT: s_xor_b64 vcc, s[2:3], vcc
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: s_add_u32 s0, s4, 0
+; GFX8-NEXT: s_cselect_b32 s2, 1, 0
+; GFX8-NEXT: s_and_b32 s2, s2, 1
+; GFX8-NEXT: s_cmp_lg_u32 s2, 0
+; GFX8-NEXT: s_addc_u32 s3, s4, s5
+; GFX8-NEXT: v_mov_b32_e32 v1, s0
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v3, s3
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v4
+; GFX8-NEXT: v_readfirstlane_b32 s1, v2
+; GFX8-NEXT: v_readfirstlane_b32 s2, v0
+; GFX8-NEXT: v_readfirstlane_b32 s3, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_v2i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_u32 s8, s0, s4
+; GFX9-NEXT: s_cselect_b32 s9, 1, 0
+; GFX9-NEXT: s_and_b32 s9, s9, 1
+; GFX9-NEXT: s_cmp_lg_u32 s9, 0
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: s_addc_u32 s9, s1, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], s[4:5], 0
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
+; GFX9-NEXT: s_ashr_i32 s4, s9, 31
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX9-NEXT: s_add_u32 s0, s4, 0
+; GFX9-NEXT: s_cselect_b32 s1, 1, 0
+; GFX9-NEXT: s_and_b32 s1, s1, 1
+; GFX9-NEXT: s_brev_b32 s5, 1
+; GFX9-NEXT: s_cmp_lg_u32 s1, 0
+; GFX9-NEXT: s_addc_u32 s1, s4, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: s_add_u32 s0, s2, s6
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: s_cselect_b32 s1, 1, 0
+; GFX9-NEXT: s_and_b32 s1, s1, 1
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: s_cmp_lg_u32 s1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: s_addc_u32 s1, s3, s7
+; GFX9-NEXT: v_mov_b32_e32 v3, s9
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[2:3], s[6:7], 0
+; GFX9-NEXT: s_ashr_i32 s4, s1, 31
+; GFX9-NEXT: s_xor_b64 vcc, s[2:3], vcc
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: s_add_u32 s0, s4, 0
+; GFX9-NEXT: s_cselect_b32 s2, 1, 0
+; GFX9-NEXT: s_and_b32 s2, s2, 1
+; GFX9-NEXT: s_cmp_lg_u32 s2, 0
+; GFX9-NEXT: s_addc_u32 s3, s4, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v5, s1
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v4
+; GFX9-NEXT: v_readfirstlane_b32 s1, v2
+; GFX9-NEXT: v_readfirstlane_b32 s2, v0
+; GFX9-NEXT: v_readfirstlane_b32 s3, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_v2i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_add_u32 s8, s0, s4
+; GFX10-NEXT: s_cselect_b32 s9, 1, 0
+; GFX10-NEXT: v_cmp_lt_i64_e64 s4, s[4:5], 0
+; GFX10-NEXT: s_and_b32 s9, s9, 1
+; GFX10-NEXT: v_mov_b32_e32 v0, s8
+; GFX10-NEXT: s_cmp_lg_u32 s9, 0
+; GFX10-NEXT: s_brev_b32 s10, 1
+; GFX10-NEXT: s_addc_u32 s9, s1, s5
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[8:9], s[0:1]
+; GFX10-NEXT: s_ashr_i32 s1, s9, 31
+; GFX10-NEXT: v_mov_b32_e32 v1, s9
+; GFX10-NEXT: s_xor_b32 s8, s4, s0
+; GFX10-NEXT: s_add_u32 s0, s1, 0
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s0, s8
+; GFX10-NEXT: s_and_b32 s4, s4, 1
+; GFX10-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-NEXT: s_addc_u32 s1, s1, s10
+; GFX10-NEXT: s_add_u32 s4, s2, s6
+; GFX10-NEXT: s_cselect_b32 s5, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s1, s8
+; GFX10-NEXT: s_and_b32 s5, s5, 1
+; GFX10-NEXT: v_mov_b32_e32 v2, s4
+; GFX10-NEXT: s_cmp_lg_u32 s5, 0
+; GFX10-NEXT: s_addc_u32 s5, s3, s7
+; GFX10-NEXT: v_cmp_lt_i64_e64 s2, s[4:5], s[2:3]
+; GFX10-NEXT: v_cmp_lt_i64_e64 s3, s[6:7], 0
+; GFX10-NEXT: s_ashr_i32 s1, s5, 31
+; GFX10-NEXT: v_mov_b32_e32 v3, s5
+; GFX10-NEXT: s_xor_b32 s2, s3, s2
+; GFX10-NEXT: s_add_u32 s0, s1, 0
+; GFX10-NEXT: s_cselect_b32 s3, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s0, s2
+; GFX10-NEXT: s_and_b32 s3, s3, 1
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: s_cmp_lg_u32 s3, 0
+; GFX10-NEXT: s_addc_u32 s1, s1, s10
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s1, s2
+; GFX10-NEXT: v_readfirstlane_b32 s1, v1
+; GFX10-NEXT: v_readfirstlane_b32 s2, v2
+; GFX10-NEXT: v_readfirstlane_b32 s3, v3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+ ret <2 x i64> %result
+}
+
+define amdgpu_ps i128 @s_saddsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
+; GFX6-LABEL: s_saddsat_i128:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_add_u32 s8, s0, s4
+; GFX6-NEXT: s_cselect_b32 s9, 1, 0
+; GFX6-NEXT: s_and_b32 s9, s9, 1
+; GFX6-NEXT: s_cmp_lg_u32 s9, 0
+; GFX6-NEXT: s_addc_u32 s9, s1, s5
+; GFX6-NEXT: s_cselect_b32 s10, 1, 0
+; GFX6-NEXT: s_and_b32 s10, s10, 1
+; GFX6-NEXT: s_cmp_lg_u32 s10, 0
+; GFX6-NEXT: s_addc_u32 s10, s2, s6
+; GFX6-NEXT: s_cselect_b32 s11, 1, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, s1
+; GFX6-NEXT: s_and_b32 s11, s11, 1
+; GFX6-NEXT: v_mov_b32_e32 v2, s0
+; GFX6-NEXT: s_cmp_lg_u32 s11, 0
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT: v_mov_b32_e32 v0, s2
+; GFX6-NEXT: s_addc_u32 s11, s3, s7
+; GFX6-NEXT: v_mov_b32_e32 v1, s3
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[10:11], v[0:1]
+; GFX6-NEXT: v_cmp_lt_u64_e64 s[0:1], s[4:5], 0
+; GFX6-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[10:11], v[0:1]
+; GFX6-NEXT: s_movk_i32 s2, 0x7f
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX6-NEXT: v_cmp_lt_i64_e64 s[0:1], s[6:7], 0
+; GFX6-NEXT: v_cmp_eq_u64_e64 vcc, s[6:7], 0
+; GFX6-NEXT: s_sub_i32 s6, s2, 64
+; GFX6-NEXT: s_sub_i32 s4, 64, s2
+; GFX6-NEXT: s_cmp_lt_u32 s2, 64
+; GFX6-NEXT: s_cselect_b32 s12, 1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s13, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX6-NEXT: s_ashr_i64 s[0:1], s[10:11], s2
+; GFX6-NEXT: s_lshr_b64 s[2:3], s[8:9], s2
+; GFX6-NEXT: s_lshl_b64 s[4:5], s[10:11], s4
+; GFX6-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GFX6-NEXT: s_ashr_i32 s4, s11, 31
+; GFX6-NEXT: s_ashr_i64 s[6:7], s[10:11], s6
+; GFX6-NEXT: s_and_b32 s12, s12, 1
+; GFX6-NEXT: s_cmp_lg_u32 s12, 0
+; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], s[6:7]
+; GFX6-NEXT: s_and_b32 s6, s13, 1
+; GFX6-NEXT: s_cmp_lg_u32 s6, 0
+; GFX6-NEXT: s_cselect_b64 s[2:3], s[8:9], s[2:3]
+; GFX6-NEXT: s_mov_b32 s5, s4
+; GFX6-NEXT: s_cmp_lg_u32 s12, 0
+; GFX6-NEXT: s_cselect_b64 s[0:1], s[0:1], s[4:5]
+; GFX6-NEXT: s_add_u32 s2, s2, 0
+; GFX6-NEXT: s_cselect_b32 s4, 1, 0
+; GFX6-NEXT: s_and_b32 s4, s4, 1
+; GFX6-NEXT: s_cmp_lg_u32 s4, 0
+; GFX6-NEXT: s_addc_u32 s3, s3, 0
+; GFX6-NEXT: s_cselect_b32 s4, 1, 0
+; GFX6-NEXT: s_and_b32 s4, s4, 1
+; GFX6-NEXT: s_cmp_lg_u32 s4, 0
+; GFX6-NEXT: s_addc_u32 s0, s0, 0
+; GFX6-NEXT: s_cselect_b32 s4, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX6-NEXT: s_and_b32 s4, s4, 1
+; GFX6-NEXT: s_cmp_lg_u32 s4, 0
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: s_addc_u32 s1, s1, 0x80000000
+; GFX6-NEXT: v_mov_b32_e32 v1, s2
+; GFX6-NEXT: v_mov_b32_e32 v2, s3
+; GFX6-NEXT: v_mov_b32_e32 v3, s8
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_mov_b32_e32 v4, s9
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v4, v2, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s0
+; GFX6-NEXT: v_mov_b32_e32 v3, s1
+; GFX6-NEXT: v_mov_b32_e32 v4, s10
+; GFX6-NEXT: v_mov_b32_e32 v5, s11
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v0
+; GFX6-NEXT: v_readfirstlane_b32 s1, v1
+; GFX6-NEXT: v_readfirstlane_b32 s2, v2
+; GFX6-NEXT: v_readfirstlane_b32 s3, v3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_i128:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_add_u32 s8, s0, s4
+; GFX8-NEXT: s_cselect_b32 s9, 1, 0
+; GFX8-NEXT: s_and_b32 s9, s9, 1
+; GFX8-NEXT: s_cmp_lg_u32 s9, 0
+; GFX8-NEXT: s_addc_u32 s9, s1, s5
+; GFX8-NEXT: s_cselect_b32 s10, 1, 0
+; GFX8-NEXT: s_and_b32 s10, s10, 1
+; GFX8-NEXT: s_cmp_lg_u32 s10, 0
+; GFX8-NEXT: s_addc_u32 s10, s2, s6
+; GFX8-NEXT: s_cselect_b32 s11, 1, 0
+; GFX8-NEXT: s_and_b32 s11, s11, 1
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: s_cmp_lg_u32 s11, 0
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: s_addc_u32 s11, s3, s7
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: s_cmp_eq_u64 s[10:11], s[2:3]
+; GFX8-NEXT: s_cselect_b32 s2, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[10:11], v[0:1]
+; GFX8-NEXT: s_and_b32 s0, 1, s2
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: v_cmp_lt_u64_e64 s[0:1], s[4:5], 0
+; GFX8-NEXT: s_cmp_eq_u64 s[6:7], 0
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX8-NEXT: v_cmp_lt_i64_e64 s[0:1], s[6:7], 0
+; GFX8-NEXT: s_cselect_b32 s2, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX8-NEXT: s_and_b32 s0, 1, s2
+; GFX8-NEXT: s_movk_i32 s2, 0x7f
+; GFX8-NEXT: s_sub_i32 s6, s2, 64
+; GFX8-NEXT: s_sub_i32 s4, 64, s2
+; GFX8-NEXT: s_cmp_lt_u32 s2, 64
+; GFX8-NEXT: s_cselect_b32 s12, 1, 0
+; GFX8-NEXT: s_cmp_eq_u32 s2, 0
+; GFX8-NEXT: s_cselect_b32 s13, 1, 0
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: s_ashr_i64 s[0:1], s[10:11], s2
+; GFX8-NEXT: s_lshr_b64 s[2:3], s[8:9], s2
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[10:11], s4
+; GFX8-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GFX8-NEXT: s_ashr_i32 s4, s11, 31
+; GFX8-NEXT: s_ashr_i64 s[6:7], s[10:11], s6
+; GFX8-NEXT: s_and_b32 s12, s12, 1
+; GFX8-NEXT: s_cmp_lg_u32 s12, 0
+; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], s[6:7]
+; GFX8-NEXT: s_and_b32 s6, s13, 1
+; GFX8-NEXT: s_cmp_lg_u32 s6, 0
+; GFX8-NEXT: s_cselect_b64 s[2:3], s[8:9], s[2:3]
+; GFX8-NEXT: s_mov_b32 s5, s4
+; GFX8-NEXT: s_cmp_lg_u32 s12, 0
+; GFX8-NEXT: s_cselect_b64 s[0:1], s[0:1], s[4:5]
+; GFX8-NEXT: s_add_u32 s2, s2, 0
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: s_and_b32 s4, s4, 1
+; GFX8-NEXT: s_cmp_lg_u32 s4, 0
+; GFX8-NEXT: s_addc_u32 s3, s3, 0
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: s_and_b32 s4, s4, 1
+; GFX8-NEXT: s_cmp_lg_u32 s4, 0
+; GFX8-NEXT: s_addc_u32 s0, s0, 0
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_and_b32 s4, s4, 1
+; GFX8-NEXT: s_cmp_lg_u32 s4, 0
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: s_addc_u32 s1, s1, 0x80000000
+; GFX8-NEXT: v_mov_b32_e32 v1, s2
+; GFX8-NEXT: v_mov_b32_e32 v2, s3
+; GFX8-NEXT: v_mov_b32_e32 v3, s8
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_mov_b32_e32 v4, s9
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v2, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s10
+; GFX8-NEXT: v_mov_b32_e32 v5, s11
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v0
+; GFX8-NEXT: v_readfirstlane_b32 s1, v1
+; GFX8-NEXT: v_readfirstlane_b32 s2, v2
+; GFX8-NEXT: v_readfirstlane_b32 s3, v3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_i128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_u32 s8, s0, s4
+; GFX9-NEXT: s_cselect_b32 s9, 1, 0
+; GFX9-NEXT: s_and_b32 s9, s9, 1
+; GFX9-NEXT: s_cmp_lg_u32 s9, 0
+; GFX9-NEXT: s_addc_u32 s9, s1, s5
+; GFX9-NEXT: s_cselect_b32 s10, 1, 0
+; GFX9-NEXT: s_and_b32 s10, s10, 1
+; GFX9-NEXT: s_cmp_lg_u32 s10, 0
+; GFX9-NEXT: s_addc_u32 s10, s2, s6
+; GFX9-NEXT: s_cselect_b32 s11, 1, 0
+; GFX9-NEXT: s_and_b32 s11, s11, 1
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: s_cmp_lg_u32 s11, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s0
+; GFX9-NEXT: s_addc_u32 s11, s3, s7
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: s_cmp_eq_u64 s[10:11], s[2:3]
+; GFX9-NEXT: s_cselect_b32 s2, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[10:11], v[0:1]
+; GFX9-NEXT: s_and_b32 s0, 1, s2
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: v_cmp_lt_u64_e64 s[0:1], s[4:5], 0
+; GFX9-NEXT: s_cmp_eq_u64 s[6:7], 0
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], s[6:7], 0
+; GFX9-NEXT: s_cselect_b32 s2, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX9-NEXT: s_and_b32 s0, 1, s2
+; GFX9-NEXT: s_movk_i32 s2, 0x7f
+; GFX9-NEXT: s_sub_i32 s6, s2, 64
+; GFX9-NEXT: s_sub_i32 s4, 64, s2
+; GFX9-NEXT: s_cmp_lt_u32 s2, 64
+; GFX9-NEXT: s_cselect_b32 s12, 1, 0
+; GFX9-NEXT: s_cmp_eq_u32 s2, 0
+; GFX9-NEXT: s_cselect_b32 s13, 1, 0
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: s_ashr_i64 s[0:1], s[10:11], s2
+; GFX9-NEXT: s_lshr_b64 s[2:3], s[8:9], s2
+; GFX9-NEXT: s_lshl_b64 s[4:5], s[10:11], s4
+; GFX9-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GFX9-NEXT: s_ashr_i32 s4, s11, 31
+; GFX9-NEXT: s_ashr_i64 s[6:7], s[10:11], s6
+; GFX9-NEXT: s_and_b32 s12, s12, 1
+; GFX9-NEXT: s_cmp_lg_u32 s12, 0
+; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], s[6:7]
+; GFX9-NEXT: s_and_b32 s6, s13, 1
+; GFX9-NEXT: s_cmp_lg_u32 s6, 0
+; GFX9-NEXT: s_cselect_b64 s[2:3], s[8:9], s[2:3]
+; GFX9-NEXT: s_mov_b32 s5, s4
+; GFX9-NEXT: s_cmp_lg_u32 s12, 0
+; GFX9-NEXT: s_cselect_b64 s[0:1], s[0:1], s[4:5]
+; GFX9-NEXT: s_add_u32 s2, s2, 0
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: s_and_b32 s4, s4, 1
+; GFX9-NEXT: s_cmp_lg_u32 s4, 0
+; GFX9-NEXT: s_addc_u32 s3, s3, 0
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: s_and_b32 s4, s4, 1
+; GFX9-NEXT: s_cmp_lg_u32 s4, 0
+; GFX9-NEXT: s_addc_u32 s0, s0, 0
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX9-NEXT: s_and_b32 s4, s4, 1
+; GFX9-NEXT: s_cmp_lg_u32 s4, 0
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: s_addc_u32 s1, s1, 0x80000000
+; GFX9-NEXT: v_mov_b32_e32 v1, s2
+; GFX9-NEXT: v_mov_b32_e32 v2, s3
+; GFX9-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_mov_b32_e32 v4, s9
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v2, s0
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: v_mov_b32_e32 v4, s10
+; GFX9-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: v_readfirstlane_b32 s1, v1
+; GFX9-NEXT: v_readfirstlane_b32 s2, v2
+; GFX9-NEXT: v_readfirstlane_b32 s3, v3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_i128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_add_u32 s8, s0, s4
+; GFX10-NEXT: s_cselect_b32 s9, 1, 0
+; GFX10-NEXT: s_movk_i32 s12, 0x7f
+; GFX10-NEXT: s_and_b32 s9, s9, 1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lg_u32 s9, 0
+; GFX10-NEXT: s_addc_u32 s9, s1, s5
+; GFX10-NEXT: s_cselect_b32 s10, 1, 0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s0, s[8:9], s[0:1]
+; GFX10-NEXT: s_and_b32 s10, s10, 1
+; GFX10-NEXT: s_cmp_lg_u32 s10, 0
+; GFX10-NEXT: s_addc_u32 s10, s2, s6
+; GFX10-NEXT: s_cselect_b32 s11, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX10-NEXT: s_and_b32 s11, s11, 1
+; GFX10-NEXT: v_mov_b32_e32 v3, s10
+; GFX10-NEXT: s_cmp_lg_u32 s11, 0
+; GFX10-NEXT: s_addc_u32 s11, s3, s7
+; GFX10-NEXT: s_cmp_eq_u64 s[10:11], s[2:3]
+; GFX10-NEXT: v_cmp_lt_i64_e64 s1, s[10:11], s[2:3]
+; GFX10-NEXT: s_cselect_b32 s0, 1, 0
+; GFX10-NEXT: v_mov_b32_e32 v4, s11
+; GFX10-NEXT: s_and_b32 s0, 1, s0
+; GFX10-NEXT: s_cmp_eq_u64 s[6:7], 0
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s0, s[4:5], 0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s1
+; GFX10-NEXT: s_cselect_b32 s1, 1, 0
+; GFX10-NEXT: s_sub_i32 s13, s12, 64
+; GFX10-NEXT: s_and_b32 s14, 1, s1
+; GFX10-NEXT: s_sub_i32 s2, 64, s12
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0
+; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[6:7], 0
+; GFX10-NEXT: s_cmp_lt_u32 s12, 64
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s14
+; GFX10-NEXT: s_cselect_b32 s15, 1, 0
+; GFX10-NEXT: s_cmp_eq_u32 s12, 0
+; GFX10-NEXT: s_cselect_b32 s16, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0
+; GFX10-NEXT: s_lshr_b64 s[0:1], s[8:9], s12
+; GFX10-NEXT: s_lshl_b64 s[2:3], s[10:11], s2
+; GFX10-NEXT: s_ashr_i64 s[4:5], s[10:11], s12
+; GFX10-NEXT: s_and_b32 s12, s15, 1
+; GFX10-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX10-NEXT: s_ashr_i32 s2, s11, 31
+; GFX10-NEXT: s_ashr_i64 s[6:7], s[10:11], s13
+; GFX10-NEXT: s_cmp_lg_u32 s12, 0
+; GFX10-NEXT: s_mov_b32 s3, s2
+; GFX10-NEXT: s_cselect_b64 s[0:1], s[0:1], s[6:7]
+; GFX10-NEXT: s_and_b32 s6, s16, 1
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc_lo
+; GFX10-NEXT: s_cmp_lg_u32 s6, 0
+; GFX10-NEXT: v_mov_b32_e32 v2, s9
+; GFX10-NEXT: s_cselect_b64 s[0:1], s[8:9], s[0:1]
+; GFX10-NEXT: s_cmp_lg_u32 s12, 0
+; GFX10-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX10-NEXT: s_cselect_b64 s[2:3], s[4:5], s[2:3]
+; GFX10-NEXT: s_add_u32 s0, s0, 0
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: v_mov_b32_e32 v1, s8
+; GFX10-NEXT: s_and_b32 s4, s4, 1
+; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX10-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-NEXT: s_addc_u32 s1, s1, 0
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT: s_and_b32 s4, s4, 1
+; GFX10-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-NEXT: s_addc_u32 s2, s2, 0
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, s0, vcc_lo
+; GFX10-NEXT: s_and_b32 s4, s4, 1
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, s1, vcc_lo
+; GFX10-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v3, s2, vcc_lo
+; GFX10-NEXT: s_addc_u32 s3, s3, 0x80000000
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v4, s3, vcc_lo
+; GFX10-NEXT: v_readfirstlane_b32 s1, v1
+; GFX10-NEXT: v_readfirstlane_b32 s2, v2
+; GFX10-NEXT: v_readfirstlane_b32 s3, v3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i128 @llvm.sadd.sat.i128(i128 %lhs, i128 %rhs)
+ ret i128 %result
+}
+
+define amdgpu_ps <4 x float> @saddsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
+; GFX6-LABEL: saddsat_i128_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v5, s1
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, s0, v0
+; GFX6-NEXT: v_addc_u32_e32 v5, vcc, v5, v1, vcc
+; GFX6-NEXT: v_mov_b32_e32 v6, s2
+; GFX6-NEXT: v_mov_b32_e32 v7, s3
+; GFX6-NEXT: v_addc_u32_e32 v6, vcc, v6, v2, vcc
+; GFX6-NEXT: v_addc_u32_e32 v7, vcc, v7, v3, vcc
+; GFX6-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[4:5]
+; GFX6-NEXT: s_movk_i32 s0, 0x7f
+; GFX6-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GFX6-NEXT: v_cmp_gt_i64_e32 vcc, s[2:3], v[6:7]
+; GFX6-NEXT: s_sub_i32 s1, s0, 64
+; GFX6-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[6:7]
+; GFX6-NEXT: s_sub_i32 s2, 64, s0
+; GFX6-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; GFX6-NEXT: v_cmp_gt_u64_e32 vcc, 0, v[0:1]
+; GFX6-NEXT: s_cmp_lt_u32 s0, 64
+; GFX6-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[2:3]
+; GFX6-NEXT: s_cselect_b32 s3, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GFX6-NEXT: v_lshl_b64 v[2:3], v[6:7], s2
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX6-NEXT: v_xor_b32_e32 v10, v0, v8
+; GFX6-NEXT: v_lshr_b64 v[0:1], v[4:5], s0
+; GFX6-NEXT: s_cmp_eq_u32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s4, 1, 0
+; GFX6-NEXT: v_ashr_i64 v[8:9], v[6:7], s0
+; GFX6-NEXT: s_and_b32 s0, 1, s3
+; GFX6-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX6-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_ashr_i64 v[0:1], v[6:7], s1
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX6-NEXT: s_and_b32 s0, 1, s4
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX6-NEXT: s_and_b32 s0, 1, s3
+; GFX6-NEXT: v_ashrrev_i32_e32 v11, 31, v7
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, 0, v0
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX6-NEXT: v_bfrev_b32_e32 v8, 1
+; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v3, v8, vcc
+; GFX6-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: saddsat_i128_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, s0, v0
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v5, v1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v6, s2
+; GFX8-NEXT: v_mov_b32_e32 v7, s3
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, v6, v2, vcc
+; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v7, v3, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[4:5]
+; GFX8-NEXT: s_movk_i32 s0, 0x7f
+; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, s[2:3], v[6:7]
+; GFX8-NEXT: s_sub_i32 s1, s0, 64
+; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[6:7]
+; GFX8-NEXT: s_sub_i32 s2, 64, s0
+; GFX8-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, 0, v[0:1]
+; GFX8-NEXT: s_cmp_lt_u32 s0, 64
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[2:3]
+; GFX8-NEXT: s_cselect_b32 s3, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GFX8-NEXT: v_lshlrev_b64 v[2:3], s2, v[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX8-NEXT: v_xor_b32_e32 v10, v0, v8
+; GFX8-NEXT: v_lshrrev_b64 v[0:1], s0, v[4:5]
+; GFX8-NEXT: s_cmp_eq_u32 s0, 0
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: v_ashrrev_i64 v[8:9], s0, v[6:7]
+; GFX8-NEXT: s_and_b32 s0, 1, s3
+; GFX8-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX8-NEXT: v_ashrrev_i64 v[0:1], s1, v[6:7]
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: s_and_b32 s0, 1, s4
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: s_and_b32 s0, 1, s3
+; GFX8-NEXT: v_ashrrev_i32_e32 v11, 31, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0, v0
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT: v_bfrev_b32_e32 v8, 1
+; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v8, vcc
+; GFX8-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: saddsat_i128_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v5, s1
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, s0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v6, s2
+; GFX9-NEXT: v_mov_b32_e32 v7, s3
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, v6, v2, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v7, v3, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[4:5]
+; GFX9-NEXT: s_movk_i32 s0, 0x7f
+; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, s[2:3], v[6:7]
+; GFX9-NEXT: s_sub_i32 s1, s0, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[6:7]
+; GFX9-NEXT: s_sub_i32 s2, 64, s0
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, 0, v[0:1]
+; GFX9-NEXT: s_cmp_lt_u32 s0, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[2:3]
+; GFX9-NEXT: s_cselect_b32 s3, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GFX9-NEXT: v_lshlrev_b64 v[2:3], s2, v[6:7]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-NEXT: v_xor_b32_e32 v10, v0, v8
+; GFX9-NEXT: v_lshrrev_b64 v[0:1], s0, v[4:5]
+; GFX9-NEXT: s_cmp_eq_u32 s0, 0
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: v_ashrrev_i64 v[8:9], s0, v[6:7]
+; GFX9-NEXT: s_and_b32 s0, 1, s3
+; GFX9-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX9-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX9-NEXT: v_ashrrev_i64 v[0:1], s1, v[6:7]
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: s_and_b32 s0, 1, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: s_and_b32 s0, 1, s3
+; GFX9-NEXT: v_ashrrev_i32_e32 v11, 31, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: v_bfrev_b32_e32 v8, 1
+; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v8, vcc
+; GFX9-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: saddsat_i128_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_add_co_u32_e64 v4, vcc_lo, s0, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, s2, v2, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, s3, v3, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[0:1], v[4:5]
+; GFX10-NEXT: s_movk_i32 s0, 0x7f
+; GFX10-NEXT: s_sub_i32 s1, 64, s0
+; GFX10-NEXT: v_lshrrev_b64 v[15:16], s0, v[4:5]
+; GFX10-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v10, v9, v8, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, 0, v[0:1]
+; GFX10-NEXT: v_lshlrev_b64 v[8:9], s1, v[6:7]
+; GFX10-NEXT: s_sub_i32 s1, s0, 64
+; GFX10-NEXT: s_cmp_lt_u32 s0, 64
+; GFX10-NEXT: v_ashrrev_i64 v[0:1], s0, v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, 0, v[2:3]
+; GFX10-NEXT: v_or_b32_e32 v8, v15, v8
+; GFX10-NEXT: v_or_b32_e32 v9, v16, v9
+; GFX10-NEXT: v_ashrrev_i32_e32 v15, 31, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
+; GFX10-NEXT: v_ashrrev_i64 v[2:3], s1, v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v11, v12, v11, vcc_lo
+; GFX10-NEXT: s_cselect_b32 vcc_lo, 1, 0
+; GFX10-NEXT: s_cmp_eq_u32 s0, 0
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX10-NEXT: s_cselect_b32 s1, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX10-NEXT: s_and_b32 s0, 1, s1
+; GFX10-NEXT: s_and_b32 s1, 1, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
+; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, s1
+; GFX10-NEXT: v_xor_b32_e32 v9, v11, v10
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v15, v0, s0
+; GFX10-NEXT: v_and_b32_e32 v8, 1, v9
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v15, v1, s0
+; GFX10-NEXT: v_add_co_u32_e64 v2, vcc_lo, v2, 0
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, v8
+; GFX10-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v0, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, 0x80000000, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, v2, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, v3, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i128 @llvm.sadd.sat.i128(i128 %lhs, i128 %rhs)
+ %cast = bitcast i128 %result to <4 x float>
+ ret <4 x float> %cast
+}
+
+define amdgpu_ps <4 x float> @saddsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
+; GFX6-LABEL: saddsat_i128_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v5, s1
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, s0, v0
+; GFX6-NEXT: v_addc_u32_e32 v5, vcc, v1, v5, vcc
+; GFX6-NEXT: v_mov_b32_e32 v6, s2
+; GFX6-NEXT: v_mov_b32_e32 v7, s3
+; GFX6-NEXT: v_addc_u32_e32 v6, vcc, v2, v6, vcc
+; GFX6-NEXT: v_addc_u32_e32 v7, vcc, v3, v7, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
+; GFX6-NEXT: v_cmp_lt_u64_e64 s[0:1], s[0:1], 0
+; GFX6-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, v[6:7], v[2:3]
+; GFX6-NEXT: v_ashrrev_i32_e32 v11, 31, v7
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX6-NEXT: v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
+; GFX6-NEXT: v_cmp_eq_u64_e64 vcc, s[2:3], 0
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX6-NEXT: s_movk_i32 s0, 0x7f
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX6-NEXT: s_sub_i32 s2, 64, s0
+; GFX6-NEXT: s_sub_i32 s1, s0, 64
+; GFX6-NEXT: v_xor_b32_e32 v10, v1, v0
+; GFX6-NEXT: s_cmp_lt_u32 s0, 64
+; GFX6-NEXT: v_lshr_b64 v[0:1], v[4:5], s0
+; GFX6-NEXT: v_lshl_b64 v[2:3], v[6:7], s2
+; GFX6-NEXT: s_cselect_b32 s3, 1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s4, 1, 0
+; GFX6-NEXT: v_ashr_i64 v[8:9], v[6:7], s0
+; GFX6-NEXT: s_and_b32 s0, 1, s3
+; GFX6-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX6-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_ashr_i64 v[0:1], v[6:7], s1
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX6-NEXT: s_and_b32 s0, 1, s4
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX6-NEXT: s_and_b32 s0, 1, s3
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, 0, v0
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX6-NEXT: v_bfrev_b32_e32 v8, 1
+; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v3, v8, vcc
+; GFX6-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: saddsat_i128_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, s0, v0
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v1, v5, vcc
+; GFX8-NEXT: v_mov_b32_e32 v6, s2
+; GFX8-NEXT: v_mov_b32_e32 v7, s3
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, v2, v6, vcc
+; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v3, v7, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
+; GFX8-NEXT: v_cmp_lt_u64_e64 s[0:1], s[0:1], 0
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[6:7], v[2:3]
+; GFX8-NEXT: s_cmp_eq_u64 s[2:3], 0
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX8-NEXT: v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
+; GFX8-NEXT: v_ashrrev_i32_e32 v11, 31, v7
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX8-NEXT: s_and_b32 s0, 1, s4
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: s_movk_i32 s0, 0x7f
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT: s_sub_i32 s2, 64, s0
+; GFX8-NEXT: s_sub_i32 s1, s0, 64
+; GFX8-NEXT: v_xor_b32_e32 v10, v1, v0
+; GFX8-NEXT: s_cmp_lt_u32 s0, 64
+; GFX8-NEXT: v_lshrrev_b64 v[0:1], s0, v[4:5]
+; GFX8-NEXT: v_lshlrev_b64 v[2:3], s2, v[6:7]
+; GFX8-NEXT: s_cselect_b32 s3, 1, 0
+; GFX8-NEXT: s_cmp_eq_u32 s0, 0
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: v_ashrrev_i64 v[8:9], s0, v[6:7]
+; GFX8-NEXT: s_and_b32 s0, 1, s3
+; GFX8-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX8-NEXT: v_ashrrev_i64 v[0:1], s1, v[6:7]
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: s_and_b32 s0, 1, s4
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: s_and_b32 s0, 1, s3
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0, v0
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT: v_bfrev_b32_e32 v8, 1
+; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v8, vcc
+; GFX8-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: saddsat_i128_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v5, s1
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, s0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v1, v5, vcc
+; GFX9-NEXT: v_mov_b32_e32 v6, s2
+; GFX9-NEXT: v_mov_b32_e32 v7, s3
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, v2, v6, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v3, v7, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_lt_u64_e64 s[0:1], s[0:1], 0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT: s_cmp_eq_u64 s[2:3], 0
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], s[2:3], 0
+; GFX9-NEXT: v_ashrrev_i32_e32 v11, 31, v7
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX9-NEXT: s_and_b32 s0, 1, s4
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: s_movk_i32 s0, 0x7f
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: s_sub_i32 s2, 64, s0
+; GFX9-NEXT: s_sub_i32 s1, s0, 64
+; GFX9-NEXT: v_xor_b32_e32 v10, v1, v0
+; GFX9-NEXT: s_cmp_lt_u32 s0, 64
+; GFX9-NEXT: v_lshrrev_b64 v[0:1], s0, v[4:5]
+; GFX9-NEXT: v_lshlrev_b64 v[2:3], s2, v[6:7]
+; GFX9-NEXT: s_cselect_b32 s3, 1, 0
+; GFX9-NEXT: s_cmp_eq_u32 s0, 0
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: v_ashrrev_i64 v[8:9], s0, v[6:7]
+; GFX9-NEXT: s_and_b32 s0, 1, s3
+; GFX9-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX9-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX9-NEXT: v_ashrrev_i64 v[0:1], s1, v[6:7]
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: s_and_b32 s0, 1, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: s_and_b32 s0, 1, s3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: v_bfrev_b32_e32 v8, 1
+; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v8, vcc
+; GFX9-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: saddsat_i128_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: v_mov_b32_e32 v6, v1
+; GFX10-NEXT: v_mov_b32_e32 v9, v2
+; GFX10-NEXT: v_mov_b32_e32 v10, v3
+; GFX10-NEXT: s_cmp_eq_u64 s[2:3], 0
+; GFX10-NEXT: v_add_co_u32_e64 v15, vcc_lo, v5, s0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s0, s[0:1], 0
+; GFX10-NEXT: v_add_co_ci_u32_e32 v16, vcc_lo, s1, v6, vcc_lo
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: v_add_co_ci_u32_e32 v19, vcc_lo, s2, v9, vcc_lo
+; GFX10-NEXT: s_and_b32 s1, 1, s4
+; GFX10-NEXT: v_add_co_ci_u32_e32 v20, vcc_lo, s3, v10, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[15:16], v[5:6]
+; GFX10-NEXT: v_cndmask_b32_e64 v8, 0, 1, s0
+; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[2:3], 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_ashrrev_i32_e32 v7, 31, v20
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[19:20], v[9:10]
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[19:20], v[9:10]
+; GFX10-NEXT: v_cndmask_b32_e64 v9, 0, 1, s0
+; GFX10-NEXT: s_movk_i32 s0, 0x7f
+; GFX10-NEXT: s_sub_i32 s2, 64, s0
+; GFX10-NEXT: v_cndmask_b32_e32 v10, v1, v0, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1
+; GFX10-NEXT: v_lshrrev_b64 v[0:1], s0, v[15:16]
+; GFX10-NEXT: v_lshlrev_b64 v[2:3], s2, v[19:20]
+; GFX10-NEXT: s_sub_i32 s1, s0, 64
+; GFX10-NEXT: s_cmp_lt_u32 s0, 64
+; GFX10-NEXT: v_cndmask_b32_e32 v11, v9, v8, vcc_lo
+; GFX10-NEXT: s_cselect_b32 vcc_lo, 1, 0
+; GFX10-NEXT: v_ashrrev_i64 v[8:9], s1, v[19:20]
+; GFX10-NEXT: s_cmp_eq_u32 s0, 0
+; GFX10-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX10-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX10-NEXT: s_cselect_b32 s1, 1, 0
+; GFX10-NEXT: v_ashrrev_i64 v[0:1], s0, v[19:20]
+; GFX10-NEXT: s_and_b32 s0, 1, s1
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc_lo
+; GFX10-NEXT: s_and_b32 s1, 1, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
+; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, s1
+; GFX10-NEXT: v_xor_b32_e32 v9, v11, v10
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v15, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v16, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v7, v0, s0
+; GFX10-NEXT: v_and_b32_e32 v8, 1, v9
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v7, v1, s0
+; GFX10-NEXT: v_add_co_u32_e64 v2, vcc_lo, v2, 0
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, v8
+; GFX10-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v0, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, 0x80000000, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v15, v2, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v16, v3, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v19, v8, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v20, v9, s0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i128 @llvm.sadd.sat.i128(i128 %lhs, i128 %rhs)
+ %cast = bitcast i128 %result to <4 x float>
+ ret <4 x float> %cast
+}
+
+define <2 x i128> @v_saddsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
+; GFX6-LABEL: v_saddsat_v2i128:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_add_i32_e32 v16, vcc, v0, v8
+; GFX6-NEXT: v_addc_u32_e32 v17, vcc, v1, v9, vcc
+; GFX6-NEXT: v_addc_u32_e32 v18, vcc, v2, v10, vcc
+; GFX6-NEXT: v_addc_u32_e32 v19, vcc, v3, v11, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[16:17], v[0:1]
+; GFX6-NEXT: s_movk_i32 s6, 0x7f
+; GFX6-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, v[18:19], v[2:3]
+; GFX6-NEXT: s_sub_i32 s7, s6, 64
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[18:19], v[2:3]
+; GFX6-NEXT: s_sub_i32 s8, 64, s6
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX6-NEXT: v_cmp_gt_u64_e32 vcc, 0, v[8:9]
+; GFX6-NEXT: s_cmp_lt_u32 s6, 64
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX6-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[10:11]
+; GFX6-NEXT: s_cselect_b32 s4, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GFX6-NEXT: s_cmp_eq_u32 s6, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX6-NEXT: v_xor_b32_e32 v10, v1, v0
+; GFX6-NEXT: v_lshr_b64 v[0:1], v[16:17], s6
+; GFX6-NEXT: v_lshl_b64 v[2:3], v[18:19], s8
+; GFX6-NEXT: s_cselect_b32 s5, 1, 0
+; GFX6-NEXT: s_and_b32 s4, 1, s4
+; GFX6-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX6-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_ashr_i64 v[0:1], v[18:19], s7
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX6-NEXT: s_and_b32 s4, 1, s5
+; GFX6-NEXT: v_ashr_i64 v[8:9], v[18:19], s6
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, s4
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX6-NEXT: v_ashrrev_i32_e32 v11, 31, v19
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v0, v16, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v1, v17, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, 0, v0
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX6-NEXT: s_brev_b32 s4, 1
+; GFX6-NEXT: v_mov_b32_e32 v8, s4
+; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v3, v8, vcc
+; GFX6-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
+; GFX6-NEXT: v_add_i32_e32 v8, vcc, v4, v12
+; GFX6-NEXT: v_addc_u32_e32 v9, vcc, v5, v13, vcc
+; GFX6-NEXT: v_addc_u32_e32 v10, vcc, v6, v14, vcc
+; GFX6-NEXT: v_addc_u32_e32 v11, vcc, v7, v15, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
+; GFX6-NEXT: s_cmp_lt_u32 s6, 64
+; GFX6-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, v[10:11], v[6:7]
+; GFX6-NEXT: s_cselect_b32 s5, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[10:11], v[6:7]
+; GFX6-NEXT: s_cmp_eq_u32 s6, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX6-NEXT: v_cmp_gt_u64_e32 vcc, 0, v[12:13]
+; GFX6-NEXT: s_cselect_b32 s9, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX6-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[14:15]
+; GFX6-NEXT: v_ashr_i64 v[12:13], v[10:11], s6
+; GFX6-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; GFX6-NEXT: v_ashrrev_i32_e32 v15, 31, v11
+; GFX6-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GFX6-NEXT: v_xor_b32_e32 v14, v5, v4
+; GFX6-NEXT: v_lshr_b64 v[4:5], v[8:9], s6
+; GFX6-NEXT: v_lshl_b64 v[6:7], v[10:11], s8
+; GFX6-NEXT: s_and_b32 s6, 1, s5
+; GFX6-NEXT: v_or_b32_e32 v6, v4, v6
+; GFX6-NEXT: v_or_b32_e32 v7, v5, v7
+; GFX6-NEXT: v_ashr_i64 v[4:5], v[10:11], s7
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s6
+; GFX6-NEXT: s_and_b32 s6, 1, s9
+; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s6
+; GFX6-NEXT: s_and_b32 s5, 1, s5
+; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s5
+; GFX6-NEXT: v_cndmask_b32_e32 v6, v15, v12, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v7, v15, v13, vcc
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, 0, v4
+; GFX6-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; GFX6-NEXT: v_mov_b32_e32 v12, s4
+; GFX6-NEXT: v_addc_u32_e32 v6, vcc, 0, v6, vcc
+; GFX6-NEXT: v_addc_u32_e32 v7, vcc, v7, v12, vcc
+; GFX6-NEXT: v_and_b32_e32 v12, 1, v14
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX6-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v5, v9, v5, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v6, v10, v6, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v7, v11, v7, vcc
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_saddsat_v2i128:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v16, vcc, v0, v8
+; GFX8-NEXT: v_addc_u32_e32 v17, vcc, v1, v9, vcc
+; GFX8-NEXT: v_addc_u32_e32 v18, vcc, v2, v10, vcc
+; GFX8-NEXT: v_addc_u32_e32 v19, vcc, v3, v11, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[16:17], v[0:1]
+; GFX8-NEXT: s_movk_i32 s6, 0x7f
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[18:19], v[2:3]
+; GFX8-NEXT: s_sub_i32 s7, s6, 64
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[18:19], v[2:3]
+; GFX8-NEXT: s_sub_i32 s8, 64, s6
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, 0, v[8:9]
+; GFX8-NEXT: s_cmp_lt_u32 s6, 64
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[10:11]
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GFX8-NEXT: s_cmp_eq_u32 s6, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT: v_xor_b32_e32 v10, v1, v0
+; GFX8-NEXT: v_lshrrev_b64 v[0:1], s6, v[16:17]
+; GFX8-NEXT: v_lshlrev_b64 v[2:3], s8, v[18:19]
+; GFX8-NEXT: s_cselect_b32 s5, 1, 0
+; GFX8-NEXT: s_and_b32 s4, 1, s4
+; GFX8-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX8-NEXT: v_ashrrev_i64 v[0:1], s7, v[18:19]
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX8-NEXT: s_and_b32 s4, 1, s5
+; GFX8-NEXT: v_ashrrev_i64 v[8:9], s6, v[18:19]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, s4
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX8-NEXT: v_ashrrev_i32_e32 v11, 31, v19
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v16, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v17, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0, v0
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT: s_brev_b32 s4, 1
+; GFX8-NEXT: v_mov_b32_e32 v8, s4
+; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v8, vcc
+; GFX8-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
+; GFX8-NEXT: v_add_u32_e32 v8, vcc, v4, v12
+; GFX8-NEXT: v_addc_u32_e32 v9, vcc, v5, v13, vcc
+; GFX8-NEXT: v_addc_u32_e32 v10, vcc, v6, v14, vcc
+; GFX8-NEXT: v_addc_u32_e32 v11, vcc, v7, v15, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
+; GFX8-NEXT: s_cmp_lt_u32 s6, 64
+; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[10:11], v[6:7]
+; GFX8-NEXT: s_cselect_b32 s5, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[10:11], v[6:7]
+; GFX8-NEXT: s_cmp_eq_u32 s6, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, 0, v[12:13]
+; GFX8-NEXT: s_cselect_b32 s9, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[14:15]
+; GFX8-NEXT: v_ashrrev_i64 v[12:13], s6, v[10:11]
+; GFX8-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; GFX8-NEXT: v_ashrrev_i32_e32 v15, 31, v11
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GFX8-NEXT: v_xor_b32_e32 v14, v5, v4
+; GFX8-NEXT: v_lshrrev_b64 v[4:5], s6, v[8:9]
+; GFX8-NEXT: v_lshlrev_b64 v[6:7], s8, v[10:11]
+; GFX8-NEXT: s_and_b32 s6, 1, s5
+; GFX8-NEXT: v_or_b32_e32 v6, v4, v6
+; GFX8-NEXT: v_or_b32_e32 v7, v5, v7
+; GFX8-NEXT: v_ashrrev_i64 v[4:5], s7, v[10:11]
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s6
+; GFX8-NEXT: s_and_b32 s6, 1, s9
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s6
+; GFX8-NEXT: s_and_b32 s5, 1, s5
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s5
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v15, v12, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v15, v13, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0, v4
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; GFX8-NEXT: v_mov_b32_e32 v12, s4
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, 0, v6, vcc
+; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v7, v12, vcc
+; GFX8-NEXT: v_and_b32_e32 v12, 1, v14
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v9, v5, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v11, v7, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_saddsat_v2i128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_co_u32_e32 v16, vcc, v0, v8
+; GFX9-NEXT: v_addc_co_u32_e32 v17, vcc, v1, v9, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v18, vcc, v2, v10, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v19, vcc, v3, v11, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[16:17], v[0:1]
+; GFX9-NEXT: s_movk_i32 s6, 0x7f
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[18:19], v[2:3]
+; GFX9-NEXT: s_sub_i32 s7, s6, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[18:19], v[2:3]
+; GFX9-NEXT: s_sub_i32 s8, 64, s6
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, 0, v[8:9]
+; GFX9-NEXT: s_cmp_lt_u32 s6, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[10:11]
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GFX9-NEXT: s_cmp_eq_u32 s6, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: v_xor_b32_e32 v10, v1, v0
+; GFX9-NEXT: v_lshrrev_b64 v[0:1], s6, v[16:17]
+; GFX9-NEXT: v_lshlrev_b64 v[2:3], s8, v[18:19]
+; GFX9-NEXT: s_cselect_b32 s5, 1, 0
+; GFX9-NEXT: s_and_b32 s4, 1, s4
+; GFX9-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX9-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX9-NEXT: v_ashrrev_i64 v[0:1], s7, v[18:19]
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX9-NEXT: s_and_b32 s4, 1, s5
+; GFX9-NEXT: v_ashrrev_i64 v[8:9], s6, v[18:19]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-NEXT: v_ashrrev_i32_e32 v11, 31, v19
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v16, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v17, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: s_brev_b32 s4, 1
+; GFX9-NEXT: v_mov_b32_e32 v8, s4
+; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v8, vcc
+; GFX9-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v8, vcc, v4, v12
+; GFX9-NEXT: v_addc_co_u32_e32 v9, vcc, v5, v13, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v10, vcc, v6, v14, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v11, vcc, v7, v15, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
+; GFX9-NEXT: s_cmp_lt_u32 s6, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[10:11], v[6:7]
+; GFX9-NEXT: s_cselect_b32 s5, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[10:11], v[6:7]
+; GFX9-NEXT: s_cmp_eq_u32 s6, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, 0, v[12:13]
+; GFX9-NEXT: s_cselect_b32 s9, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, 0, v[14:15]
+; GFX9-NEXT: v_ashrrev_i64 v[12:13], s6, v[10:11]
+; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; GFX9-NEXT: v_ashrrev_i32_e32 v15, 31, v11
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GFX9-NEXT: v_xor_b32_e32 v14, v5, v4
+; GFX9-NEXT: v_lshrrev_b64 v[4:5], s6, v[8:9]
+; GFX9-NEXT: v_lshlrev_b64 v[6:7], s8, v[10:11]
+; GFX9-NEXT: s_and_b32 s6, 1, s5
+; GFX9-NEXT: v_or_b32_e32 v6, v4, v6
+; GFX9-NEXT: v_or_b32_e32 v7, v5, v7
+; GFX9-NEXT: v_ashrrev_i64 v[4:5], s7, v[10:11]
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s6
+; GFX9-NEXT: s_and_b32 s6, 1, s9
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s6
+; GFX9-NEXT: s_and_b32 s5, 1, s5
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s5
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v15, v12, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v15, v13, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, 0, v4
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
+; GFX9-NEXT: v_mov_b32_e32 v12, s4
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v7, v12, vcc
+; GFX9-NEXT: v_and_b32_e32 v12, 1, v14
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v5, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v10, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v11, v7, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_saddsat_v2i128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_mov_b32_e32 v22, v0
+; GFX10-NEXT: v_mov_b32_e32 v23, v1
+; GFX10-NEXT: v_mov_b32_e32 v20, v2
+; GFX10-NEXT: v_mov_b32_e32 v21, v3
+; GFX10-NEXT: s_movk_i32 s5, 0x7f
+; GFX10-NEXT: v_add_co_u32_e64 v16, vcc_lo, v22, v8
+; GFX10-NEXT: s_sub_i32 s6, 64, s5
+; GFX10-NEXT: v_add_co_ci_u32_e32 v17, vcc_lo, v23, v9, vcc_lo
+; GFX10-NEXT: s_sub_i32 s7, s5, 64
+; GFX10-NEXT: v_add_co_ci_u32_e32 v18, vcc_lo, v20, v10, vcc_lo
+; GFX10-NEXT: s_cmp_lt_u32 s5, 64
+; GFX10-NEXT: v_add_co_ci_u32_e32 v19, vcc_lo, v21, v11, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[16:17], v[22:23]
+; GFX10-NEXT: v_mov_b32_e32 v26, v4
+; GFX10-NEXT: v_mov_b32_e32 v27, v5
+; GFX10-NEXT: v_mov_b32_e32 v24, v6
+; GFX10-NEXT: v_lshlrev_b64 v[2:3], s6, v[18:19]
+; GFX10-NEXT: v_mov_b32_e32 v25, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[18:19], v[20:21]
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[18:19], v[20:21]
+; GFX10-NEXT: v_cndmask_b32_e32 v20, v1, v0, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, 0, v[8:9]
+; GFX10-NEXT: v_lshrrev_b64 v[0:1], s5, v[16:17]
+; GFX10-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, 0, v[10:11]
+; GFX10-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX10-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX10-NEXT: v_ashrrev_i64 v[0:1], s5, v[18:19]
+; GFX10-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[10:11]
+; GFX10-NEXT: v_ashrrev_i32_e32 v11, 31, v19
+; GFX10-NEXT: v_cndmask_b32_e32 v10, v9, v8, vcc_lo
+; GFX10-NEXT: s_cselect_b32 vcc_lo, 1, 0
+; GFX10-NEXT: v_ashrrev_i64 v[8:9], s7, v[18:19]
+; GFX10-NEXT: s_cmp_eq_u32 s5, 0
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: s_and_b32 s8, 1, vcc_lo
+; GFX10-NEXT: s_and_b32 s4, 1, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s4
+; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, s8
+; GFX10-NEXT: v_xor_b32_e32 v9, v10, v20
+; GFX10-NEXT: s_brev_b32 s8, 1
+; GFX10-NEXT: s_cmp_lt_u32 s5, 64
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v11, v0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v11, v1, s4
+; GFX10-NEXT: v_and_b32_e32 v8, 1, v9
+; GFX10-NEXT: v_add_co_u32_e64 v2, vcc_lo, v2, 0
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v20, vcc_lo, 0, v0, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v21, vcc_lo, s8, v1, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
+; GFX10-NEXT: v_add_co_u32_e64 v8, s4, v26, v12
+; GFX10-NEXT: v_add_co_ci_u32_e64 v9, s4, v27, v13, s4
+; GFX10-NEXT: v_add_co_ci_u32_e64 v10, s4, v24, v14, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v16, v2, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e64 v11, s4, v25, v15, s4
+; GFX10-NEXT: v_cmp_lt_u64_e64 s4, v[8:9], v[26:27]
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v17, v3, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v18, v20, vcc_lo
+; GFX10-NEXT: v_lshrrev_b64 v[3:4], s5, v[8:9]
+; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s4
+; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[10:11], v[24:25]
+; GFX10-NEXT: v_cndmask_b32_e64 v16, 0, 1, s4
+; GFX10-NEXT: v_cmp_gt_u64_e64 s4, 0, v[12:13]
+; GFX10-NEXT: v_lshlrev_b64 v[12:13], s6, v[10:11]
+; GFX10-NEXT: v_cndmask_b32_e64 v17, 0, 1, s4
+; GFX10-NEXT: v_cmp_gt_i64_e64 s4, 0, v[14:15]
+; GFX10-NEXT: v_or_b32_e32 v12, v3, v12
+; GFX10-NEXT: v_or_b32_e32 v13, v4, v13
+; GFX10-NEXT: v_ashrrev_i64 v[3:4], s5, v[10:11]
+; GFX10-NEXT: v_cndmask_b32_e64 v18, 0, 1, s4
+; GFX10-NEXT: v_cmp_eq_u64_e64 s4, v[10:11], v[24:25]
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v16, v5, s4
+; GFX10-NEXT: v_cmp_eq_u64_e64 s4, 0, v[14:15]
+; GFX10-NEXT: v_ashrrev_i64 v[5:6], s7, v[10:11]
+; GFX10-NEXT: v_cndmask_b32_e64 v14, v18, v17, s4
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: s_cmp_eq_u32 s5, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v12, s4
+; GFX10-NEXT: s_cselect_b32 s6, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v13, s4
+; GFX10-NEXT: s_and_b32 s5, 1, s6
+; GFX10-NEXT: s_and_b32 s6, 1, s4
+; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, s5
+; GFX10-NEXT: v_xor_b32_e32 v7, v14, v7
+; GFX10-NEXT: v_ashrrev_i32_e32 v18, 31, v11
+; GFX10-NEXT: v_cmp_ne_u32_e64 s5, 0, s6
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v8, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v9, s4
+; GFX10-NEXT: v_and_b32_e32 v7, 1, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v18, v3, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v18, v4, s5
+; GFX10-NEXT: v_add_co_u32_e64 v5, s4, v5, 0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s4, 0, v6, s4
+; GFX10-NEXT: v_cmp_ne_u32_e64 s5, 0, v7
+; GFX10-NEXT: v_add_co_ci_u32_e64 v7, s4, 0, v3, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v19, v21, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e64 v12, s4, s8, v4, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v8, v5, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v9, v6, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v10, v7, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v11, v12, s5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i128> @llvm.sadd.sat.v2i128(<2 x i128> %lhs, <2 x i128> %rhs)
+ ret <2 x i128> %result
+}
+
+define amdgpu_ps <2 x i128> @s_saddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128> inreg %rhs) {
+; GFX6-LABEL: s_saddsat_v2i128:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_add_u32 s16, s0, s8
+; GFX6-NEXT: s_cselect_b32 s17, 1, 0
+; GFX6-NEXT: s_and_b32 s17, s17, 1
+; GFX6-NEXT: s_cmp_lg_u32 s17, 0
+; GFX6-NEXT: s_addc_u32 s17, s1, s9
+; GFX6-NEXT: s_cselect_b32 s18, 1, 0
+; GFX6-NEXT: s_and_b32 s18, s18, 1
+; GFX6-NEXT: s_cmp_lg_u32 s18, 0
+; GFX6-NEXT: s_addc_u32 s18, s2, s10
+; GFX6-NEXT: s_cselect_b32 s19, 1, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, s1
+; GFX6-NEXT: s_and_b32 s19, s19, 1
+; GFX6-NEXT: v_mov_b32_e32 v2, s0
+; GFX6-NEXT: s_cmp_lg_u32 s19, 0
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[16:17], v[2:3]
+; GFX6-NEXT: v_mov_b32_e32 v0, s2
+; GFX6-NEXT: s_addc_u32 s19, s3, s11
+; GFX6-NEXT: s_movk_i32 s20, 0x7f
+; GFX6-NEXT: v_mov_b32_e32 v1, s3
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[18:19], v[0:1]
+; GFX6-NEXT: v_cmp_lt_u64_e64 s[0:1], s[8:9], 0
+; GFX6-NEXT: s_sub_i32 s21, s20, 64
+; GFX6-NEXT: s_sub_i32 s22, 64, s20
+; GFX6-NEXT: s_cmp_lt_u32 s20, 64
+; GFX6-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[18:19], v[0:1]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX6-NEXT: s_cselect_b32 s23, 1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s20, 0
+; GFX6-NEXT: v_cmp_lt_i64_e64 s[0:1], s[10:11], 0
+; GFX6-NEXT: s_cselect_b32 s24, 1, 0
+; GFX6-NEXT: s_lshr_b64 s[2:3], s[16:17], s20
+; GFX6-NEXT: s_lshl_b64 s[8:9], s[18:19], s22
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX6-NEXT: v_cmp_eq_u64_e64 vcc, s[10:11], 0
+; GFX6-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
+; GFX6-NEXT: s_ashr_i32 s8, s19, 31
+; GFX6-NEXT: s_ashr_i64 s[0:1], s[18:19], s20
+; GFX6-NEXT: s_ashr_i64 s[10:11], s[18:19], s21
+; GFX6-NEXT: s_and_b32 s23, s23, 1
+; GFX6-NEXT: s_cmp_lg_u32 s23, 0
+; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], s[10:11]
+; GFX6-NEXT: s_and_b32 s10, s24, 1
+; GFX6-NEXT: s_cmp_lg_u32 s10, 0
+; GFX6-NEXT: s_cselect_b64 s[2:3], s[16:17], s[2:3]
+; GFX6-NEXT: s_cmp_lg_u32 s23, 0
+; GFX6-NEXT: s_mov_b32 s9, s8
+; GFX6-NEXT: s_cselect_b64 s[0:1], s[0:1], s[8:9]
+; GFX6-NEXT: s_add_u32 s2, s2, 0
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: s_addc_u32 s3, s3, 0
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: s_addc_u32 s0, s0, 0
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX6-NEXT: s_brev_b32 s23, 1
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: s_addc_u32 s1, s1, s23
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_mov_b32_e32 v0, s0
+; GFX6-NEXT: v_mov_b32_e32 v1, s2
+; GFX6-NEXT: v_mov_b32_e32 v3, s16
+; GFX6-NEXT: s_add_u32 s0, s4, s12
+; GFX6-NEXT: v_cndmask_b32_e32 v5, v3, v1, vcc
+; GFX6-NEXT: v_mov_b32_e32 v1, s1
+; GFX6-NEXT: s_cselect_b32 s1, 1, 0
+; GFX6-NEXT: s_and_b32 s1, s1, 1
+; GFX6-NEXT: s_cmp_lg_u32 s1, 0
+; GFX6-NEXT: s_addc_u32 s1, s5, s13
+; GFX6-NEXT: s_cselect_b32 s2, 1, 0
+; GFX6-NEXT: s_and_b32 s2, s2, 1
+; GFX6-NEXT: s_cmp_lg_u32 s2, 0
+; GFX6-NEXT: v_mov_b32_e32 v2, s3
+; GFX6-NEXT: v_mov_b32_e32 v4, s17
+; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s18
+; GFX6-NEXT: v_mov_b32_e32 v3, s19
+; GFX6-NEXT: s_addc_u32 s2, s6, s14
+; GFX6-NEXT: s_cselect_b32 s3, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v6, v2, v0, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v7, v3, v1, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s4
+; GFX6-NEXT: s_and_b32 s3, s3, 1
+; GFX6-NEXT: v_mov_b32_e32 v3, s5
+; GFX6-NEXT: s_cmp_lg_u32 s3, 0
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX6-NEXT: v_mov_b32_e32 v0, s6
+; GFX6-NEXT: s_addc_u32 s3, s7, s15
+; GFX6-NEXT: v_mov_b32_e32 v1, s7
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[0:1]
+; GFX6-NEXT: v_cmp_lt_u64_e64 s[4:5], s[12:13], 0
+; GFX6-NEXT: s_cmp_lt_u32 s20, 64
+; GFX6-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX6-NEXT: s_cselect_b32 s12, 1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s20, 0
+; GFX6-NEXT: v_cmp_lt_i64_e64 s[4:5], s[14:15], 0
+; GFX6-NEXT: s_cselect_b32 s13, 1, 0
+; GFX6-NEXT: s_lshr_b64 s[6:7], s[0:1], s20
+; GFX6-NEXT: s_lshl_b64 s[8:9], s[2:3], s22
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[4:5]
+; GFX6-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
+; GFX6-NEXT: s_ashr_i32 s8, s3, 31
+; GFX6-NEXT: s_ashr_i64 s[4:5], s[2:3], s20
+; GFX6-NEXT: s_ashr_i64 s[10:11], s[2:3], s21
+; GFX6-NEXT: s_and_b32 s12, s12, 1
+; GFX6-NEXT: s_cmp_lg_u32 s12, 0
+; GFX6-NEXT: s_cselect_b64 s[6:7], s[6:7], s[10:11]
+; GFX6-NEXT: s_and_b32 s10, s13, 1
+; GFX6-NEXT: s_cmp_lg_u32 s10, 0
+; GFX6-NEXT: s_cselect_b64 s[6:7], s[0:1], s[6:7]
+; GFX6-NEXT: s_mov_b32 s9, s8
+; GFX6-NEXT: s_cmp_lg_u32 s12, 0
+; GFX6-NEXT: s_cselect_b64 s[4:5], s[4:5], s[8:9]
+; GFX6-NEXT: s_add_u32 s6, s6, 0
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: s_addc_u32 s7, s7, 0
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: v_cmp_eq_u64_e64 vcc, s[14:15], 0
+; GFX6-NEXT: s_addc_u32 s4, s4, 0
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: v_mov_b32_e32 v3, s0
+; GFX6-NEXT: v_mov_b32_e32 v8, s1
+; GFX6-NEXT: s_addc_u32 s5, s5, s23
+; GFX6-NEXT: v_mov_b32_e32 v1, s6
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_mov_b32_e32 v2, s7
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v8, v2, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s4
+; GFX6-NEXT: v_mov_b32_e32 v8, s2
+; GFX6-NEXT: v_mov_b32_e32 v3, s5
+; GFX6-NEXT: v_mov_b32_e32 v9, s3
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v5
+; GFX6-NEXT: v_readfirstlane_b32 s1, v4
+; GFX6-NEXT: v_readfirstlane_b32 s2, v6
+; GFX6-NEXT: v_readfirstlane_b32 s3, v7
+; GFX6-NEXT: v_readfirstlane_b32 s4, v0
+; GFX6-NEXT: v_readfirstlane_b32 s5, v1
+; GFX6-NEXT: v_readfirstlane_b32 s6, v2
+; GFX6-NEXT: v_readfirstlane_b32 s7, v3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_saddsat_v2i128:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_add_u32 s16, s0, s8
+; GFX8-NEXT: s_cselect_b32 s17, 1, 0
+; GFX8-NEXT: s_and_b32 s17, s17, 1
+; GFX8-NEXT: s_cmp_lg_u32 s17, 0
+; GFX8-NEXT: s_addc_u32 s17, s1, s9
+; GFX8-NEXT: s_cselect_b32 s18, 1, 0
+; GFX8-NEXT: s_and_b32 s18, s18, 1
+; GFX8-NEXT: s_cmp_lg_u32 s18, 0
+; GFX8-NEXT: s_addc_u32 s18, s2, s10
+; GFX8-NEXT: s_cselect_b32 s19, 1, 0
+; GFX8-NEXT: s_and_b32 s19, s19, 1
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: s_cmp_lg_u32 s19, 0
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: s_addc_u32 s19, s3, s11
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[16:17], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: s_cmp_eq_u64 s[18:19], s[2:3]
+; GFX8-NEXT: s_cselect_b32 s2, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[18:19], v[0:1]
+; GFX8-NEXT: s_and_b32 s0, 1, s2
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: v_cmp_lt_u64_e64 s[0:1], s[8:9], 0
+; GFX8-NEXT: s_cmp_eq_u64 s[10:11], 0
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX8-NEXT: v_cmp_lt_i64_e64 s[0:1], s[10:11], 0
+; GFX8-NEXT: s_cselect_b32 s2, 1, 0
+; GFX8-NEXT: s_movk_i32 s20, 0x7f
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX8-NEXT: s_and_b32 s0, 1, s2
+; GFX8-NEXT: s_sub_i32 s21, s20, 64
+; GFX8-NEXT: s_sub_i32 s22, 64, s20
+; GFX8-NEXT: s_cmp_lt_u32 s20, 64
+; GFX8-NEXT: s_cselect_b32 s23, 1, 0
+; GFX8-NEXT: s_cmp_eq_u32 s20, 0
+; GFX8-NEXT: s_cselect_b32 s24, 1, 0
+; GFX8-NEXT: s_lshr_b64 s[2:3], s[16:17], s20
+; GFX8-NEXT: s_lshl_b64 s[8:9], s[18:19], s22
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
+; GFX8-NEXT: s_ashr_i32 s8, s19, 31
+; GFX8-NEXT: s_ashr_i64 s[0:1], s[18:19], s20
+; GFX8-NEXT: s_ashr_i64 s[10:11], s[18:19], s21
+; GFX8-NEXT: s_and_b32 s23, s23, 1
+; GFX8-NEXT: s_cmp_lg_u32 s23, 0
+; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], s[10:11]
+; GFX8-NEXT: s_and_b32 s10, s24, 1
+; GFX8-NEXT: s_cmp_lg_u32 s10, 0
+; GFX8-NEXT: s_cselect_b64 s[2:3], s[16:17], s[2:3]
+; GFX8-NEXT: s_cmp_lg_u32 s23, 0
+; GFX8-NEXT: s_mov_b32 s9, s8
+; GFX8-NEXT: s_cselect_b64 s[0:1], s[0:1], s[8:9]
+; GFX8-NEXT: s_add_u32 s2, s2, 0
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: s_addc_u32 s3, s3, 0
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: s_addc_u32 s0, s0, 0
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_brev_b32 s23, 1
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: s_addc_u32 s1, s1, s23
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s2
+; GFX8-NEXT: v_mov_b32_e32 v3, s16
+; GFX8-NEXT: s_add_u32 s0, s4, s12
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v3, v1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: s_cselect_b32 s1, 1, 0
+; GFX8-NEXT: s_and_b32 s1, s1, 1
+; GFX8-NEXT: s_cmp_lg_u32 s1, 0
+; GFX8-NEXT: s_addc_u32 s1, s5, s13
+; GFX8-NEXT: s_cselect_b32 s2, 1, 0
+; GFX8-NEXT: s_and_b32 s2, s2, 1
+; GFX8-NEXT: s_cmp_lg_u32 s2, 0
+; GFX8-NEXT: v_mov_b32_e32 v2, s3
+; GFX8-NEXT: v_mov_b32_e32 v4, s17
+; GFX8-NEXT: s_addc_u32 s2, s6, s14
+; GFX8-NEXT: s_cselect_b32 s3, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s18
+; GFX8-NEXT: v_mov_b32_e32 v3, s19
+; GFX8-NEXT: s_and_b32 s3, s3, 1
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v3, v1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: s_cmp_lg_u32 s3, 0
+; GFX8-NEXT: v_mov_b32_e32 v3, s5
+; GFX8-NEXT: s_addc_u32 s3, s7, s15
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v0, s6
+; GFX8-NEXT: v_mov_b32_e32 v1, s7
+; GFX8-NEXT: s_cmp_eq_u64 s[2:3], s[6:7]
+; GFX8-NEXT: s_cselect_b32 s6, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT: s_and_b32 s4, 1, s6
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX8-NEXT: v_cmp_lt_u64_e64 s[4:5], s[12:13], 0
+; GFX8-NEXT: s_cmp_eq_u64 s[14:15], 0
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX8-NEXT: v_cmp_lt_i64_e64 s[4:5], s[14:15], 0
+; GFX8-NEXT: s_cselect_b32 s6, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[4:5]
+; GFX8-NEXT: s_and_b32 s4, 1, s6
+; GFX8-NEXT: s_cmp_lt_u32 s20, 64
+; GFX8-NEXT: s_cselect_b32 s12, 1, 0
+; GFX8-NEXT: s_cmp_eq_u32 s20, 0
+; GFX8-NEXT: s_cselect_b32 s13, 1, 0
+; GFX8-NEXT: s_lshr_b64 s[6:7], s[0:1], s20
+; GFX8-NEXT: s_lshl_b64 s[8:9], s[2:3], s22
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX8-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
+; GFX8-NEXT: s_ashr_i32 s8, s3, 31
+; GFX8-NEXT: s_ashr_i64 s[4:5], s[2:3], s20
+; GFX8-NEXT: s_ashr_i64 s[10:11], s[2:3], s21
+; GFX8-NEXT: s_and_b32 s12, s12, 1
+; GFX8-NEXT: s_cmp_lg_u32 s12, 0
+; GFX8-NEXT: s_cselect_b64 s[6:7], s[6:7], s[10:11]
+; GFX8-NEXT: s_and_b32 s10, s13, 1
+; GFX8-NEXT: s_cmp_lg_u32 s10, 0
+; GFX8-NEXT: s_cselect_b64 s[6:7], s[0:1], s[6:7]
+; GFX8-NEXT: s_mov_b32 s9, s8
+; GFX8-NEXT: s_cmp_lg_u32 s12, 0
+; GFX8-NEXT: s_cselect_b64 s[4:5], s[4:5], s[8:9]
+; GFX8-NEXT: s_add_u32 s6, s6, 0
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: s_addc_u32 s7, s7, 0
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: s_addc_u32 s4, s4, 0
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: v_mov_b32_e32 v3, s0
+; GFX8-NEXT: v_mov_b32_e32 v8, s1
+; GFX8-NEXT: s_addc_u32 s5, s5, s23
+; GFX8-NEXT: v_mov_b32_e32 v1, s6
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_mov_b32_e32 v2, s7
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v8, v2, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_mov_b32_e32 v8, s2
+; GFX8-NEXT: v_mov_b32_e32 v3, s5
+; GFX8-NEXT: v_mov_b32_e32 v9, s3
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v5
+; GFX8-NEXT: v_readfirstlane_b32 s1, v4
+; GFX8-NEXT: v_readfirstlane_b32 s2, v6
+; GFX8-NEXT: v_readfirstlane_b32 s3, v7
+; GFX8-NEXT: v_readfirstlane_b32 s4, v0
+; GFX8-NEXT: v_readfirstlane_b32 s5, v1
+; GFX8-NEXT: v_readfirstlane_b32 s6, v2
+; GFX8-NEXT: v_readfirstlane_b32 s7, v3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_saddsat_v2i128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_u32 s16, s0, s8
+; GFX9-NEXT: s_cselect_b32 s17, 1, 0
+; GFX9-NEXT: s_and_b32 s17, s17, 1
+; GFX9-NEXT: s_cmp_lg_u32 s17, 0
+; GFX9-NEXT: s_addc_u32 s17, s1, s9
+; GFX9-NEXT: s_cselect_b32 s18, 1, 0
+; GFX9-NEXT: s_and_b32 s18, s18, 1
+; GFX9-NEXT: s_cmp_lg_u32 s18, 0
+; GFX9-NEXT: s_addc_u32 s18, s2, s10
+; GFX9-NEXT: s_cselect_b32 s19, 1, 0
+; GFX9-NEXT: s_and_b32 s19, s19, 1
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: s_cmp_lg_u32 s19, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s0
+; GFX9-NEXT: s_addc_u32 s19, s3, s11
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[16:17], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: s_cmp_eq_u64 s[18:19], s[2:3]
+; GFX9-NEXT: s_cselect_b32 s2, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[18:19], v[0:1]
+; GFX9-NEXT: s_and_b32 s0, 1, s2
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: v_cmp_lt_u64_e64 s[0:1], s[8:9], 0
+; GFX9-NEXT: s_cmp_eq_u64 s[10:11], 0
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], s[10:11], 0
+; GFX9-NEXT: s_cselect_b32 s2, 1, 0
+; GFX9-NEXT: s_movk_i32 s20, 0x7f
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX9-NEXT: s_and_b32 s0, 1, s2
+; GFX9-NEXT: s_sub_i32 s21, s20, 64
+; GFX9-NEXT: s_sub_i32 s22, 64, s20
+; GFX9-NEXT: s_cmp_lt_u32 s20, 64
+; GFX9-NEXT: s_cselect_b32 s23, 1, 0
+; GFX9-NEXT: s_cmp_eq_u32 s20, 0
+; GFX9-NEXT: s_cselect_b32 s24, 1, 0
+; GFX9-NEXT: s_lshr_b64 s[2:3], s[16:17], s20
+; GFX9-NEXT: s_lshl_b64 s[8:9], s[18:19], s22
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
+; GFX9-NEXT: s_ashr_i32 s8, s19, 31
+; GFX9-NEXT: s_ashr_i64 s[0:1], s[18:19], s20
+; GFX9-NEXT: s_ashr_i64 s[10:11], s[18:19], s21
+; GFX9-NEXT: s_and_b32 s23, s23, 1
+; GFX9-NEXT: s_cmp_lg_u32 s23, 0
+; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], s[10:11]
+; GFX9-NEXT: s_and_b32 s10, s24, 1
+; GFX9-NEXT: s_cmp_lg_u32 s10, 0
+; GFX9-NEXT: s_cselect_b64 s[2:3], s[16:17], s[2:3]
+; GFX9-NEXT: s_cmp_lg_u32 s23, 0
+; GFX9-NEXT: s_mov_b32 s9, s8
+; GFX9-NEXT: s_cselect_b64 s[0:1], s[0:1], s[8:9]
+; GFX9-NEXT: s_add_u32 s2, s2, 0
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: s_addc_u32 s3, s3, 0
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: s_addc_u32 s0, s0, 0
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX9-NEXT: s_brev_b32 s23, 1
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: s_addc_u32 s1, s1, s23
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: v_mov_b32_e32 v1, s2
+; GFX9-NEXT: v_mov_b32_e32 v3, s16
+; GFX9-NEXT: s_add_u32 s0, s4, s12
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
+; GFX9-NEXT: s_cselect_b32 s1, 1, 0
+; GFX9-NEXT: s_and_b32 s1, s1, 1
+; GFX9-NEXT: s_cmp_lg_u32 s1, 0
+; GFX9-NEXT: s_addc_u32 s1, s5, s13
+; GFX9-NEXT: s_cselect_b32 s2, 1, 0
+; GFX9-NEXT: s_and_b32 s2, s2, 1
+; GFX9-NEXT: s_cmp_lg_u32 s2, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s3
+; GFX9-NEXT: v_mov_b32_e32 v4, s17
+; GFX9-NEXT: s_addc_u32 s2, s6, s14
+; GFX9-NEXT: s_cselect_b32 s3, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v2, s18
+; GFX9-NEXT: v_mov_b32_e32 v3, s19
+; GFX9-NEXT: s_and_b32 s3, s3, 1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v3, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: s_cmp_lg_u32 s3, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: s_addc_u32 s3, s7, s15
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-NEXT: s_cmp_eq_u64 s[2:3], s[6:7]
+; GFX9-NEXT: s_cselect_b32 s6, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[0:1]
+; GFX9-NEXT: s_and_b32 s4, 1, s6
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX9-NEXT: v_cmp_lt_u64_e64 s[4:5], s[12:13], 0
+; GFX9-NEXT: s_cmp_eq_u64 s[14:15], 0
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], s[14:15], 0
+; GFX9-NEXT: s_cselect_b32 s6, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[4:5]
+; GFX9-NEXT: s_and_b32 s4, 1, s6
+; GFX9-NEXT: s_cmp_lt_u32 s20, 64
+; GFX9-NEXT: s_cselect_b32 s12, 1, 0
+; GFX9-NEXT: s_cmp_eq_u32 s20, 0
+; GFX9-NEXT: s_cselect_b32 s13, 1, 0
+; GFX9-NEXT: s_lshr_b64 s[6:7], s[0:1], s20
+; GFX9-NEXT: s_lshl_b64 s[8:9], s[2:3], s22
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX9-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
+; GFX9-NEXT: s_ashr_i32 s8, s3, 31
+; GFX9-NEXT: s_ashr_i64 s[4:5], s[2:3], s20
+; GFX9-NEXT: s_ashr_i64 s[10:11], s[2:3], s21
+; GFX9-NEXT: s_and_b32 s12, s12, 1
+; GFX9-NEXT: s_cmp_lg_u32 s12, 0
+; GFX9-NEXT: s_cselect_b64 s[6:7], s[6:7], s[10:11]
+; GFX9-NEXT: s_and_b32 s10, s13, 1
+; GFX9-NEXT: s_cmp_lg_u32 s10, 0
+; GFX9-NEXT: s_cselect_b64 s[6:7], s[0:1], s[6:7]
+; GFX9-NEXT: s_mov_b32 s9, s8
+; GFX9-NEXT: s_cmp_lg_u32 s12, 0
+; GFX9-NEXT: s_cselect_b64 s[4:5], s[4:5], s[8:9]
+; GFX9-NEXT: s_add_u32 s6, s6, 0
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: s_addc_u32 s4, s4, 0
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_mov_b32_e32 v3, s0
+; GFX9-NEXT: v_mov_b32_e32 v8, s1
+; GFX9-NEXT: s_addc_u32 s5, s5, s23
+; GFX9-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, s7
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v8, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v8, s2
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: v_mov_b32_e32 v9, s3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v5
+; GFX9-NEXT: v_readfirstlane_b32 s1, v4
+; GFX9-NEXT: v_readfirstlane_b32 s2, v6
+; GFX9-NEXT: v_readfirstlane_b32 s3, v7
+; GFX9-NEXT: v_readfirstlane_b32 s4, v0
+; GFX9-NEXT: v_readfirstlane_b32 s5, v1
+; GFX9-NEXT: v_readfirstlane_b32 s6, v2
+; GFX9-NEXT: v_readfirstlane_b32 s7, v3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_saddsat_v2i128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_add_u32 s28, s0, s8
+; GFX10-NEXT: s_cselect_b32 s17, 1, 0
+; GFX10-NEXT: s_mov_b32 s46, s0
+; GFX10-NEXT: s_and_b32 s17, s17, 1
+; GFX10-NEXT: s_mov_b32 s47, s1
+; GFX10-NEXT: s_cmp_lg_u32 s17, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_addc_u32 s29, s1, s9
+; GFX10-NEXT: s_cselect_b32 s18, 1, 0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s0, s[28:29], s[46:47]
+; GFX10-NEXT: s_and_b32 s18, s18, 1
+; GFX10-NEXT: s_cmp_lg_u32 s18, 0
+; GFX10-NEXT: s_addc_u32 s30, s2, s10
+; GFX10-NEXT: s_cselect_b32 s19, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX10-NEXT: s_and_b32 s19, s19, 1
+; GFX10-NEXT: s_cmp_lg_u32 s19, 0
+; GFX10-NEXT: s_addc_u32 s31, s3, s11
+; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[30:31], s[2:3]
+; GFX10-NEXT: s_cmp_eq_u64 s[30:31], s[2:3]
+; GFX10-NEXT: v_cmp_lt_u64_e64 s2, s[8:9], 0
+; GFX10-NEXT: s_cselect_b32 s20, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0
+; GFX10-NEXT: s_and_b32 s0, 1, s20
+; GFX10-NEXT: s_cmp_eq_u64 s[10:11], 0
+; GFX10-NEXT: s_movk_i32 s20, 0x7f
+; GFX10-NEXT: s_cselect_b32 s1, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, s2
+; GFX10-NEXT: v_cmp_lt_i64_e64 s2, s[10:11], 0
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
+; GFX10-NEXT: s_and_b32 s1, 1, s1
+; GFX10-NEXT: s_sub_i32 s21, s20, 64
+; GFX10-NEXT: s_sub_i32 s22, 64, s20
+; GFX10-NEXT: s_cmp_lt_u32 s20, 64
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX10-NEXT: s_cselect_b32 s10, 1, 0
+; GFX10-NEXT: s_cmp_eq_u32 s20, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s2
+; GFX10-NEXT: s_cselect_b32 s23, 1, 0
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1
+; GFX10-NEXT: s_lshr_b64 s[0:1], s[28:29], s20
+; GFX10-NEXT: s_lshl_b64 s[2:3], s[30:31], s22
+; GFX10-NEXT: s_and_b32 s24, s10, 1
+; GFX10-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX10-NEXT: s_ashr_i32 s2, s31, 31
+; GFX10-NEXT: s_ashr_i64 s[8:9], s[30:31], s20
+; GFX10-NEXT: s_ashr_i64 s[10:11], s[30:31], s21
+; GFX10-NEXT: s_cmp_lg_u32 s24, 0
+; GFX10-NEXT: s_mov_b32 s3, s2
+; GFX10-NEXT: s_cselect_b64 s[0:1], s[0:1], s[10:11]
+; GFX10-NEXT: s_and_b32 s10, s23, 1
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc_lo
+; GFX10-NEXT: s_cmp_lg_u32 s10, 0
+; GFX10-NEXT: v_mov_b32_e32 v2, s29
+; GFX10-NEXT: s_cselect_b64 s[0:1], s[28:29], s[0:1]
+; GFX10-NEXT: s_cmp_lg_u32 s24, 0
+; GFX10-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX10-NEXT: s_cselect_b64 s[2:3], s[8:9], s[2:3]
+; GFX10-NEXT: s_add_u32 s0, s0, 0
+; GFX10-NEXT: s_cselect_b32 s8, 1, 0
+; GFX10-NEXT: v_mov_b32_e32 v1, s28
+; GFX10-NEXT: s_and_b32 s8, s8, 1
+; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX10-NEXT: s_cmp_lg_u32 s8, 0
+; GFX10-NEXT: s_brev_b32 s23, 1
+; GFX10-NEXT: s_addc_u32 s1, s1, 0
+; GFX10-NEXT: s_cselect_b32 s8, 1, 0
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT: s_and_b32 s8, s8, 1
+; GFX10-NEXT: v_mov_b32_e32 v3, s31
+; GFX10-NEXT: s_cmp_lg_u32 s8, 0
+; GFX10-NEXT: s_addc_u32 s2, s2, 0
+; GFX10-NEXT: s_cselect_b32 s8, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, s0, vcc_lo
+; GFX10-NEXT: s_and_b32 s8, s8, 1
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, s1, vcc_lo
+; GFX10-NEXT: s_cmp_lg_u32 s8, 0
+; GFX10-NEXT: v_mov_b32_e32 v2, s30
+; GFX10-NEXT: s_addc_u32 s3, s3, s23
+; GFX10-NEXT: s_add_u32 s0, s4, s12
+; GFX10-NEXT: s_cselect_b32 s1, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s3, vcc_lo
+; GFX10-NEXT: s_and_b32 s1, s1, 1
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s2, vcc_lo
+; GFX10-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10-NEXT: s_addc_u32 s1, s5, s13
+; GFX10-NEXT: s_cselect_b32 s8, 1, 0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s3, s[0:1], s[4:5]
+; GFX10-NEXT: s_and_b32 s8, s8, 1
+; GFX10-NEXT: s_cmp_lg_u32 s8, 0
+; GFX10-NEXT: s_addc_u32 s8, s6, s14
+; GFX10-NEXT: s_cselect_b32 s9, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v4, 0, 1, s3
+; GFX10-NEXT: s_and_b32 s9, s9, 1
+; GFX10-NEXT: v_mov_b32_e32 v7, s8
+; GFX10-NEXT: s_cmp_lg_u32 s9, 0
+; GFX10-NEXT: s_addc_u32 s9, s7, s15
+; GFX10-NEXT: s_cmp_eq_u64 s[8:9], s[6:7]
+; GFX10-NEXT: v_cmp_lt_i64_e64 s3, s[8:9], s[6:7]
+; GFX10-NEXT: s_cselect_b32 s2, 1, 0
+; GFX10-NEXT: v_mov_b32_e32 v8, s9
+; GFX10-NEXT: s_and_b32 s2, 1, s2
+; GFX10-NEXT: s_cmp_eq_u64 s[14:15], 0
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s2
+; GFX10-NEXT: v_cmp_lt_u64_e64 s2, s[12:13], 0
+; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s3
+; GFX10-NEXT: s_cselect_b32 s3, 1, 0
+; GFX10-NEXT: s_and_b32 s16, 1, s3
+; GFX10-NEXT: s_cmp_lt_u32 s20, 64
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s2
+; GFX10-NEXT: v_cmp_lt_i64_e64 s2, s[14:15], 0
+; GFX10-NEXT: s_cselect_b32 s10, 1, 0
+; GFX10-NEXT: s_cmp_eq_u32 s20, 0
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s16
+; GFX10-NEXT: s_cselect_b32 s12, 1, 0
+; GFX10-NEXT: s_lshl_b64 s[4:5], s[8:9], s22
+; GFX10-NEXT: v_cndmask_b32_e64 v6, 0, 1, s2
+; GFX10-NEXT: s_lshr_b64 s[2:3], s[0:1], s20
+; GFX10-NEXT: s_and_b32 s13, s10, 1
+; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GFX10-NEXT: s_ashr_i32 s4, s9, 31
+; GFX10-NEXT: s_ashr_i64 s[6:7], s[8:9], s20
+; GFX10-NEXT: s_ashr_i64 s[10:11], s[8:9], s21
+; GFX10-NEXT: s_cmp_lg_u32 s13, 0
+; GFX10-NEXT: s_mov_b32 s5, s4
+; GFX10-NEXT: s_cselect_b64 s[2:3], s[2:3], s[10:11]
+; GFX10-NEXT: s_and_b32 s10, s12, 1
+; GFX10-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc_lo
+; GFX10-NEXT: s_cmp_lg_u32 s10, 0
+; GFX10-NEXT: v_mov_b32_e32 v6, s1
+; GFX10-NEXT: s_cselect_b64 s[2:3], s[0:1], s[2:3]
+; GFX10-NEXT: s_cmp_lg_u32 s13, 0
+; GFX10-NEXT: v_xor_b32_e32 v4, v5, v4
+; GFX10-NEXT: s_cselect_b64 s[4:5], s[6:7], s[4:5]
+; GFX10-NEXT: s_add_u32 s2, s2, 0
+; GFX10-NEXT: s_cselect_b32 s6, 1, 0
+; GFX10-NEXT: v_mov_b32_e32 v5, s0
+; GFX10-NEXT: s_and_b32 s6, s6, 1
+; GFX10-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX10-NEXT: s_cmp_lg_u32 s6, 0
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: s_addc_u32 s3, s3, 0
+; GFX10-NEXT: s_cselect_b32 s6, 1, 0
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
+; GFX10-NEXT: s_and_b32 s6, s6, 1
+; GFX10-NEXT: s_cmp_lg_u32 s6, 0
+; GFX10-NEXT: s_addc_u32 s4, s4, 0
+; GFX10-NEXT: s_cselect_b32 s6, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v5, s2, vcc_lo
+; GFX10-NEXT: s_and_b32 s6, s6, 1
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v6, s3, vcc_lo
+; GFX10-NEXT: s_cmp_lg_u32 s6, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v7, s4, vcc_lo
+; GFX10-NEXT: s_addc_u32 s1, s5, s23
+; GFX10-NEXT: v_readfirstlane_b32 s2, v2
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v8, s1, vcc_lo
+; GFX10-NEXT: v_readfirstlane_b32 s1, v1
+; GFX10-NEXT: v_readfirstlane_b32 s3, v3
+; GFX10-NEXT: v_readfirstlane_b32 s4, v4
+; GFX10-NEXT: v_readfirstlane_b32 s5, v5
+; GFX10-NEXT: v_readfirstlane_b32 s6, v6
+; GFX10-NEXT: v_readfirstlane_b32 s7, v7
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i128> @llvm.sadd.sat.v2i128(<2 x i128> %lhs, <2 x i128> %rhs)
+ ret <2 x i128> %result
+}
+
+declare i7 @llvm.sadd.sat.i7(i7, i7) #0
+declare i8 @llvm.sadd.sat.i8(i8, i8) #0
+declare <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8>, <2 x i8>) #0
+declare <4 x i8> @llvm.sadd.sat.v4i8(<4 x i8>, <4 x i8>) #0
+
+declare i16 @llvm.sadd.sat.i16(i16, i16) #0
+declare <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16>, <2 x i16>) #0
+declare <3 x i16> @llvm.sadd.sat.v3i16(<3 x i16>, <3 x i16>) #0
+declare <4 x i16> @llvm.sadd.sat.v4i16(<4 x i16>, <4 x i16>) #0
+declare <5 x i16> @llvm.sadd.sat.v5i16(<5 x i16>, <5 x i16>) #0
+declare <6 x i16> @llvm.sadd.sat.v6i16(<6 x i16>, <6 x i16>) #0
+declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) #0
+
+declare i24 @llvm.sadd.sat.i24(i24, i24) #0
+
+declare i32 @llvm.sadd.sat.i32(i32, i32) #0
+declare <2 x i32> @llvm.sadd.sat.v2i32(<2 x i32>, <2 x i32>) #0
+declare <3 x i32> @llvm.sadd.sat.v3i32(<3 x i32>, <3 x i32>) #0
+declare <4 x i32> @llvm.sadd.sat.v4i32(<4 x i32>, <4 x i32>) #0
+declare <5 x i32> @llvm.sadd.sat.v5i32(<5 x i32>, <5 x i32>) #0
+declare <16 x i32> @llvm.sadd.sat.v16i32(<16 x i32>, <16 x i32>) #0
+
+declare i48 @llvm.sadd.sat.i48(i48, i48) #0
+
+declare i64 @llvm.sadd.sat.i64(i64, i64) #0
+declare <2 x i64> @llvm.sadd.sat.v2i64(<2 x i64>, <2 x i64>) #0
+
+declare i128 @llvm.sadd.sat.i128(i128, i128) #0
+declare <2 x i128> @llvm.sadd.sat.v2i128(<2 x i128>, <2 x i128>) #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
new file mode 100644
index 000000000000..a7154131e3c0
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/ssubsat.ll
@@ -0,0 +1,9914 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti -o - %s | FileCheck -check-prefix=GFX6 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=fiji -o - %s | FileCheck -check-prefix=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -o - %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -o - %s | FileCheck -check-prefix=GFX10 %s
+
+define i7 @v_ssubsat_i7(i7 %lhs, i7 %rhs) {
+; GFX6-LABEL: v_ssubsat_i7:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 25, v0
+; GFX6-NEXT: v_max_i32_e32 v2, -1, v0
+; GFX6-NEXT: v_min_i32_e32 v3, -1, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 25, v1
+; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, 0x7fffffff, v2
+; GFX6-NEXT: v_subrev_i32_e32 v3, vcc, 0x80000000, v3
+; GFX6-NEXT: v_max_i32_e32 v1, v2, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 25, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_i7:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 9, v0
+; GFX8-NEXT: s_mov_b32 s4, 0xffff
+; GFX8-NEXT: v_max_i16_e32 v2, s4, v0
+; GFX8-NEXT: v_min_i16_e32 v3, s4, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 9, v1
+; GFX8-NEXT: v_subrev_u16_e32 v2, 0x7fff, v2
+; GFX8-NEXT: v_subrev_u16_e32 v3, 0x8000, v3
+; GFX8-NEXT: v_max_i16_e32 v1, v2, v1
+; GFX8-NEXT: v_min_i16_e32 v1, v1, v3
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_ashrrev_i16_e32 v0, 9, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_i7:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 9, v0
+; GFX9-NEXT: s_mov_b32 s4, 0xffff
+; GFX9-NEXT: v_max_i16_e32 v2, s4, v0
+; GFX9-NEXT: v_min_i16_e32 v3, s4, v0
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 9, v1
+; GFX9-NEXT: v_subrev_u16_e32 v2, 0x7fff, v2
+; GFX9-NEXT: v_subrev_u16_e32 v3, 0x8000, v3
+; GFX9-NEXT: v_max_i16_e32 v1, v2, v1
+; GFX9-NEXT: v_min_i16_e32 v1, v1, v3
+; GFX9-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_ashrrev_i16_e32 v0, 9, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_i7:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_lshlrev_b16_e64 v0, 9, v0
+; GFX10-NEXT: s_mov_b32 s4, 0xffff
+; GFX10-NEXT: v_lshlrev_b16_e64 v1, 9, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_max_i16_e64 v2, v0, s4
+; GFX10-NEXT: v_min_i16_e64 v3, v0, s4
+; GFX10-NEXT: v_sub_nc_u16_e64 v2, v2, 0x7fff
+; GFX10-NEXT: v_sub_nc_u16_e64 v3, v3, 0x8000
+; GFX10-NEXT: v_max_i16_e64 v1, v2, v1
+; GFX10-NEXT: v_min_i16_e64 v1, v1, v3
+; GFX10-NEXT: v_sub_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: v_ashrrev_i16_e64 v0, 9, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i7 @llvm.ssub.sat.i7(i7 %lhs, i7 %rhs)
+ ret i7 %result
+}
+
+define amdgpu_ps i7 @s_ssubsat_i7(i7 inreg %lhs, i7 inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_i7:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 25
+; GFX6-NEXT: s_lshl_b32 s1, s1, 25
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s2, s0, -1
+; GFX6-NEXT: s_sub_i32 s2, s2, 0x7fffffff
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s3, s0, -1
+; GFX6-NEXT: s_sub_i32 s3, s3, 0x80000000
+; GFX6-NEXT: s_cmp_gt_i32 s2, s1
+; GFX6-NEXT: s_cselect_b32 s1, s2, s1
+; GFX6-NEXT: s_cmp_lt_i32 s1, s3
+; GFX6-NEXT: s_cselect_b32 s1, s1, s3
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: s_ashr_i32 s0, s0, 25
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_i7:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s2, 9, 0x100000
+; GFX8-NEXT: s_lshl_b32 s0, s0, s2
+; GFX8-NEXT: s_lshl_b32 s1, s1, s2
+; GFX8-NEXT: s_sext_i32_i16 s3, s0
+; GFX8-NEXT: s_sext_i32_i16 s4, 0xffff
+; GFX8-NEXT: s_cmp_gt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s5, s3, s4
+; GFX8-NEXT: s_sub_i32 s5, s5, 0x7fff
+; GFX8-NEXT: s_cmp_lt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_sub_i32 s3, s3, 0x8000
+; GFX8-NEXT: s_sext_i32_i16 s4, s5
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_cmp_gt_i32 s4, s1
+; GFX8-NEXT: s_cselect_b32 s1, s4, s1
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_cmp_lt_i32 s1, s3
+; GFX8-NEXT: s_cselect_b32 s1, s1, s3
+; GFX8-NEXT: s_sub_i32 s0, s0, s1
+; GFX8-NEXT: s_sext_i32_i16 s0, s0
+; GFX8-NEXT: s_ashr_i32 s0, s0, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_i7:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s2, 9, 0x100000
+; GFX9-NEXT: s_lshl_b32 s0, s0, s2
+; GFX9-NEXT: s_lshl_b32 s1, s1, s2
+; GFX9-NEXT: s_sext_i32_i16 s3, s0
+; GFX9-NEXT: s_sext_i32_i16 s4, 0xffff
+; GFX9-NEXT: s_cmp_gt_i32 s3, s4
+; GFX9-NEXT: s_cselect_b32 s5, s3, s4
+; GFX9-NEXT: s_sub_i32 s5, s5, 0x7fff
+; GFX9-NEXT: s_cmp_lt_i32 s3, s4
+; GFX9-NEXT: s_cselect_b32 s3, s3, s4
+; GFX9-NEXT: s_sub_i32 s3, s3, 0x8000
+; GFX9-NEXT: s_sext_i32_i16 s4, s5
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_cmp_gt_i32 s4, s1
+; GFX9-NEXT: s_cselect_b32 s1, s4, s1
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_sext_i32_i16 s3, s3
+; GFX9-NEXT: s_cmp_lt_i32 s1, s3
+; GFX9-NEXT: s_cselect_b32 s1, s1, s3
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: s_sext_i32_i16 s0, s0
+; GFX9-NEXT: s_ashr_i32 s0, s0, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_i7:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s2, 9, 0x100000
+; GFX10-NEXT: s_sext_i32_i16 s4, 0xffff
+; GFX10-NEXT: s_lshl_b32 s0, s0, s2
+; GFX10-NEXT: s_lshl_b32 s1, s1, s2
+; GFX10-NEXT: s_sext_i32_i16 s3, s0
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_gt_i32 s3, s4
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s5, s3, s4
+; GFX10-NEXT: s_sub_i32 s5, s5, 0x7fff
+; GFX10-NEXT: s_cmp_lt_i32 s3, s4
+; GFX10-NEXT: s_cselect_b32 s3, s3, s4
+; GFX10-NEXT: s_sext_i32_i16 s4, s5
+; GFX10-NEXT: s_sub_i32 s3, s3, 0x8000
+; GFX10-NEXT: s_cmp_gt_i32 s4, s1
+; GFX10-NEXT: s_sext_i32_i16 s3, s3
+; GFX10-NEXT: s_cselect_b32 s1, s4, s1
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s3
+; GFX10-NEXT: s_cselect_b32 s1, s1, s3
+; GFX10-NEXT: s_sub_i32 s0, s0, s1
+; GFX10-NEXT: s_sext_i32_i16 s0, s0
+; GFX10-NEXT: s_ashr_i32 s0, s0, s2
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i7 @llvm.ssub.sat.i7(i7 %lhs, i7 %rhs)
+ ret i7 %result
+}
+
+define i8 @v_ssubsat_i8(i8 %lhs, i8 %rhs) {
+; GFX6-LABEL: v_ssubsat_i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: v_max_i32_e32 v2, -1, v0
+; GFX6-NEXT: v_min_i32_e32 v3, -1, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, 0x7fffffff, v2
+; GFX6-NEXT: v_subrev_i32_e32 v3, vcc, 0x80000000, v3
+; GFX6-NEXT: v_max_i32_e32 v1, v2, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 24, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: s_mov_b32 s4, 0xffff
+; GFX8-NEXT: v_max_i16_e32 v2, s4, v0
+; GFX8-NEXT: v_min_i16_e32 v3, s4, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_subrev_u16_e32 v2, 0x7fff, v2
+; GFX8-NEXT: v_subrev_u16_e32 v3, 0x8000, v3
+; GFX8-NEXT: v_max_i16_e32 v1, v2, v1
+; GFX8-NEXT: v_min_i16_e32 v1, v1, v3
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_ashrrev_i16_e32 v0, 8, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: s_mov_b32 s4, 0xffff
+; GFX9-NEXT: v_max_i16_e32 v2, s4, v0
+; GFX9-NEXT: v_min_i16_e32 v3, s4, v0
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_subrev_u16_e32 v2, 0x7fff, v2
+; GFX9-NEXT: v_subrev_u16_e32 v3, 0x8000, v3
+; GFX9-NEXT: v_max_i16_e32 v1, v2, v1
+; GFX9-NEXT: v_min_i16_e32 v1, v1, v3
+; GFX9-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_ashrrev_i16_e32 v0, 8, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_lshlrev_b16_e64 v0, 8, v0
+; GFX10-NEXT: s_mov_b32 s4, 0xffff
+; GFX10-NEXT: v_lshlrev_b16_e64 v1, 8, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_max_i16_e64 v2, v0, s4
+; GFX10-NEXT: v_min_i16_e64 v3, v0, s4
+; GFX10-NEXT: v_sub_nc_u16_e64 v2, v2, 0x7fff
+; GFX10-NEXT: v_sub_nc_u16_e64 v3, v3, 0x8000
+; GFX10-NEXT: v_max_i16_e64 v1, v2, v1
+; GFX10-NEXT: v_min_i16_e64 v1, v1, v3
+; GFX10-NEXT: v_sub_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: v_ashrrev_i16_e64 v0, 8, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i8 @llvm.ssub.sat.i8(i8 %lhs, i8 %rhs)
+ ret i8 %result
+}
+
+define amdgpu_ps i8 @s_ssubsat_i8(i8 inreg %lhs, i8 inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s2, s0, -1
+; GFX6-NEXT: s_sub_i32 s2, s2, 0x7fffffff
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s3, s0, -1
+; GFX6-NEXT: s_sub_i32 s3, s3, 0x80000000
+; GFX6-NEXT: s_cmp_gt_i32 s2, s1
+; GFX6-NEXT: s_cselect_b32 s1, s2, s1
+; GFX6-NEXT: s_cmp_lt_i32 s1, s3
+; GFX6-NEXT: s_cselect_b32 s1, s1, s3
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: s_ashr_i32 s0, s0, 24
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX8-NEXT: s_lshl_b32 s0, s0, s2
+; GFX8-NEXT: s_lshl_b32 s1, s1, s2
+; GFX8-NEXT: s_sext_i32_i16 s3, s0
+; GFX8-NEXT: s_sext_i32_i16 s4, 0xffff
+; GFX8-NEXT: s_cmp_gt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s5, s3, s4
+; GFX8-NEXT: s_sub_i32 s5, s5, 0x7fff
+; GFX8-NEXT: s_cmp_lt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_sub_i32 s3, s3, 0x8000
+; GFX8-NEXT: s_sext_i32_i16 s4, s5
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_cmp_gt_i32 s4, s1
+; GFX8-NEXT: s_cselect_b32 s1, s4, s1
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_cmp_lt_i32 s1, s3
+; GFX8-NEXT: s_cselect_b32 s1, s1, s3
+; GFX8-NEXT: s_sub_i32 s0, s0, s1
+; GFX8-NEXT: s_sext_i32_i16 s0, s0
+; GFX8-NEXT: s_ashr_i32 s0, s0, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX9-NEXT: s_lshl_b32 s0, s0, s2
+; GFX9-NEXT: s_lshl_b32 s1, s1, s2
+; GFX9-NEXT: s_sext_i32_i16 s3, s0
+; GFX9-NEXT: s_sext_i32_i16 s4, 0xffff
+; GFX9-NEXT: s_cmp_gt_i32 s3, s4
+; GFX9-NEXT: s_cselect_b32 s5, s3, s4
+; GFX9-NEXT: s_sub_i32 s5, s5, 0x7fff
+; GFX9-NEXT: s_cmp_lt_i32 s3, s4
+; GFX9-NEXT: s_cselect_b32 s3, s3, s4
+; GFX9-NEXT: s_sub_i32 s3, s3, 0x8000
+; GFX9-NEXT: s_sext_i32_i16 s4, s5
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_cmp_gt_i32 s4, s1
+; GFX9-NEXT: s_cselect_b32 s1, s4, s1
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_sext_i32_i16 s3, s3
+; GFX9-NEXT: s_cmp_lt_i32 s1, s3
+; GFX9-NEXT: s_cselect_b32 s1, s1, s3
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: s_sext_i32_i16 s0, s0
+; GFX9-NEXT: s_ashr_i32 s0, s0, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX10-NEXT: s_sext_i32_i16 s4, 0xffff
+; GFX10-NEXT: s_lshl_b32 s0, s0, s2
+; GFX10-NEXT: s_lshl_b32 s1, s1, s2
+; GFX10-NEXT: s_sext_i32_i16 s3, s0
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_gt_i32 s3, s4
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s5, s3, s4
+; GFX10-NEXT: s_sub_i32 s5, s5, 0x7fff
+; GFX10-NEXT: s_cmp_lt_i32 s3, s4
+; GFX10-NEXT: s_cselect_b32 s3, s3, s4
+; GFX10-NEXT: s_sext_i32_i16 s4, s5
+; GFX10-NEXT: s_sub_i32 s3, s3, 0x8000
+; GFX10-NEXT: s_cmp_gt_i32 s4, s1
+; GFX10-NEXT: s_sext_i32_i16 s3, s3
+; GFX10-NEXT: s_cselect_b32 s1, s4, s1
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s3
+; GFX10-NEXT: s_cselect_b32 s1, s1, s3
+; GFX10-NEXT: s_sub_i32 s0, s0, s1
+; GFX10-NEXT: s_sext_i32_i16 s0, s0
+; GFX10-NEXT: s_ashr_i32 s0, s0, s2
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i8 @llvm.ssub.sat.i8(i8 %lhs, i8 %rhs)
+ ret i8 %result
+}
+
+define i16 @v_ssubsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
+; GFX6-LABEL: v_ssubsat_v2i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 8, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v4, -1, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 8, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_subrev_i32_e32 v4, vcc, s4, v4
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v5, -1, v0
+; GFX6-NEXT: v_subrev_i32_e32 v5, vcc, s5, v5
+; GFX6-NEXT: v_max_i32_e32 v1, v4, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v5
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 24, v3
+; GFX6-NEXT: v_max_i32_e32 v3, -1, v1
+; GFX6-NEXT: v_min_i32_e32 v4, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v3, vcc, s4, v3
+; GFX6-NEXT: v_subrev_i32_e32 v4, vcc, s5, v4
+; GFX6-NEXT: v_max_i32_e32 v2, v3, v2
+; GFX6-NEXT: v_min_i32_e32 v2, v2, v4
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 24, v1
+; GFX6-NEXT: v_mov_b32_e32 v2, 0xff
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 24, v0
+; GFX6-NEXT: v_and_b32_e32 v1, v1, v2
+; GFX6-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_v2i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, 8
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v2, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: s_mov_b32 s6, 0xffff
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v4, s6, v0
+; GFX8-NEXT: v_lshrrev_b32_sdwa v2, v2, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_subrev_u16_e32 v4, s4, v4
+; GFX8-NEXT: s_mov_b32 s5, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v5, s6, v0
+; GFX8-NEXT: v_max_i16_e32 v1, v4, v1
+; GFX8-NEXT: v_subrev_u16_e32 v5, s5, v5
+; GFX8-NEXT: v_min_i16_e32 v1, v1, v5
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_max_i16_e32 v1, s6, v3
+; GFX8-NEXT: v_subrev_u16_e32 v1, s4, v1
+; GFX8-NEXT: v_min_i16_e32 v4, s6, v3
+; GFX8-NEXT: v_max_i16_e32 v1, v1, v2
+; GFX8-NEXT: v_subrev_u16_e32 v4, s5, v4
+; GFX8-NEXT: v_min_i16_e32 v1, v1, v4
+; GFX8-NEXT: v_mov_b32_e32 v2, 0xff
+; GFX8-NEXT: v_sub_u16_e32 v1, v3, v1
+; GFX8-NEXT: v_and_b32_sdwa v0, sext(v0), v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v1), v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_v2i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 8
+; GFX9-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: s_mov_b32 s6, 0xffff
+; GFX9-NEXT: v_lshrrev_b32_sdwa v3, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_max_i16_e32 v4, s6, v0
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_subrev_u16_e32 v4, s4, v4
+; GFX9-NEXT: s_mov_b32 s5, 0x8000
+; GFX9-NEXT: v_min_i16_e32 v5, s6, v0
+; GFX9-NEXT: v_max_i16_e32 v1, v4, v1
+; GFX9-NEXT: v_subrev_u16_e32 v5, s5, v5
+; GFX9-NEXT: v_min_i16_e32 v1, v1, v5
+; GFX9-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_max_i16_e32 v1, s6, v2
+; GFX9-NEXT: v_subrev_u16_e32 v1, s4, v1
+; GFX9-NEXT: v_min_i16_e32 v4, s6, v2
+; GFX9-NEXT: v_subrev_u16_e32 v4, s5, v4
+; GFX9-NEXT: v_max_i16_e32 v1, v1, v3
+; GFX9-NEXT: v_min_i16_e32 v1, v1, v4
+; GFX9-NEXT: s_movk_i32 s4, 0xff
+; GFX9-NEXT: v_sub_u16_e32 v1, v2, v1
+; GFX9-NEXT: v_and_b32_sdwa v0, sext(v0), s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_and_b32_sdwa v1, sext(v1), s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_v2i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_mov_b32 s4, 8
+; GFX10-NEXT: v_lshlrev_b16_e64 v2, 8, v0
+; GFX10-NEXT: v_lshrrev_b32_sdwa v0, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: s_mov_b32 s6, 0xffff
+; GFX10-NEXT: s_movk_i32 s5, 0x7fff
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_max_i16_e64 v4, v2, s6
+; GFX10-NEXT: v_max_i16_e64 v5, v0, s6
+; GFX10-NEXT: v_lshlrev_b16_e64 v1, 8, v1
+; GFX10-NEXT: v_min_i16_e64 v6, v2, s6
+; GFX10-NEXT: v_min_i16_e64 v7, v0, s6
+; GFX10-NEXT: v_sub_nc_u16_e64 v4, v4, s5
+; GFX10-NEXT: v_sub_nc_u16_e64 v5, v5, s5
+; GFX10-NEXT: s_mov_b32 s4, 0x8000
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u16_e64 v6, v6, s4
+; GFX10-NEXT: v_max_i16_e64 v1, v4, v1
+; GFX10-NEXT: v_sub_nc_u16_e64 v7, v7, s4
+; GFX10-NEXT: v_max_i16_e64 v10, v5, v3
+; GFX10-NEXT: s_movk_i32 s4, 0xff
+; GFX10-NEXT: v_min_i16_e64 v1, v1, v6
+; GFX10-NEXT: v_min_i16_e64 v3, v10, v7
+; GFX10-NEXT: v_sub_nc_u16_e64 v1, v2, v1
+; GFX10-NEXT: v_sub_nc_u16_e64 v0, v0, v3
+; GFX10-NEXT: v_and_b32_sdwa v1, sext(v1), s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_b32_sdwa v0, sext(v0), s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %lhs = bitcast i16 %lhs.arg to <2 x i8>
+ %rhs = bitcast i16 %rhs.arg to <2 x i8>
+ %result = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %lhs, <2 x i8> %rhs)
+ %cast.result = bitcast <2 x i8> %result to i16
+ ret i16 %cast.result
+}
+
+define amdgpu_ps i16 @s_ssubsat_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg) {
+; GFX6-LABEL: s_ssubsat_v2i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshr_b32 s2, s0, 8
+; GFX6-NEXT: s_lshr_b32 s3, s1, 8
+; GFX6-NEXT: s_lshl_b32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: s_cselect_b32 s6, s0, -1
+; GFX6-NEXT: s_sub_i32 s6, s6, s4
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s7, s0, -1
+; GFX6-NEXT: s_sub_i32 s7, s7, s5
+; GFX6-NEXT: s_cmp_gt_i32 s6, s1
+; GFX6-NEXT: s_cselect_b32 s1, s6, s1
+; GFX6-NEXT: s_cmp_lt_i32 s1, s7
+; GFX6-NEXT: s_cselect_b32 s1, s1, s7
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: s_lshl_b32 s1, s2, 24
+; GFX6-NEXT: s_ashr_i32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s2, s3, 24
+; GFX6-NEXT: s_cmp_gt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s3, s1, -1
+; GFX6-NEXT: s_sub_i32 s3, s3, s4
+; GFX6-NEXT: s_cmp_lt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s4, s1, -1
+; GFX6-NEXT: s_sub_i32 s4, s4, s5
+; GFX6-NEXT: s_cmp_gt_i32 s3, s2
+; GFX6-NEXT: s_cselect_b32 s2, s3, s2
+; GFX6-NEXT: s_cmp_lt_i32 s2, s4
+; GFX6-NEXT: s_cselect_b32 s2, s2, s4
+; GFX6-NEXT: s_sub_i32 s1, s1, s2
+; GFX6-NEXT: s_movk_i32 s2, 0xff
+; GFX6-NEXT: s_ashr_i32 s1, s1, 24
+; GFX6-NEXT: s_and_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s0, s0, s2
+; GFX6-NEXT: s_lshl_b32 s1, s1, 8
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_v2i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s4, 8, 0x100000
+; GFX8-NEXT: s_lshr_b32 s2, s0, 8
+; GFX8-NEXT: s_lshl_b32 s0, s0, s4
+; GFX8-NEXT: s_lshr_b32 s3, s1, 8
+; GFX8-NEXT: s_lshl_b32 s1, s1, s4
+; GFX8-NEXT: s_sext_i32_i16 s7, s0
+; GFX8-NEXT: s_sext_i32_i16 s8, 0xffff
+; GFX8-NEXT: s_cmp_gt_i32 s7, s8
+; GFX8-NEXT: s_movk_i32 s5, 0x7fff
+; GFX8-NEXT: s_cselect_b32 s9, s7, s8
+; GFX8-NEXT: s_sub_i32 s9, s9, s5
+; GFX8-NEXT: s_cmp_lt_i32 s7, s8
+; GFX8-NEXT: s_mov_b32 s6, 0x8000
+; GFX8-NEXT: s_cselect_b32 s7, s7, s8
+; GFX8-NEXT: s_sub_i32 s7, s7, s6
+; GFX8-NEXT: s_sext_i32_i16 s9, s9
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_cmp_gt_i32 s9, s1
+; GFX8-NEXT: s_cselect_b32 s1, s9, s1
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_sext_i32_i16 s7, s7
+; GFX8-NEXT: s_cmp_lt_i32 s1, s7
+; GFX8-NEXT: s_cselect_b32 s1, s1, s7
+; GFX8-NEXT: s_sub_i32 s0, s0, s1
+; GFX8-NEXT: s_sext_i32_i16 s0, s0
+; GFX8-NEXT: s_lshl_b32 s1, s2, s4
+; GFX8-NEXT: s_lshl_b32 s2, s3, s4
+; GFX8-NEXT: s_ashr_i32 s0, s0, s4
+; GFX8-NEXT: s_sext_i32_i16 s3, s1
+; GFX8-NEXT: s_cmp_gt_i32 s3, s8
+; GFX8-NEXT: s_cselect_b32 s7, s3, s8
+; GFX8-NEXT: s_sub_i32 s5, s7, s5
+; GFX8-NEXT: s_cmp_lt_i32 s3, s8
+; GFX8-NEXT: s_cselect_b32 s3, s3, s8
+; GFX8-NEXT: s_sub_i32 s3, s3, s6
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_cmp_gt_i32 s5, s2
+; GFX8-NEXT: s_cselect_b32 s2, s5, s2
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_cmp_lt_i32 s2, s3
+; GFX8-NEXT: s_cselect_b32 s2, s2, s3
+; GFX8-NEXT: s_sub_i32 s1, s1, s2
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_movk_i32 s2, 0xff
+; GFX8-NEXT: s_ashr_i32 s1, s1, s4
+; GFX8-NEXT: s_and_b32 s1, s1, s2
+; GFX8-NEXT: s_and_b32 s0, s0, s2
+; GFX8-NEXT: s_lshl_b32 s1, s1, s4
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_v2i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s4, 8, 0x100000
+; GFX9-NEXT: s_lshr_b32 s2, s0, 8
+; GFX9-NEXT: s_lshl_b32 s0, s0, s4
+; GFX9-NEXT: s_lshr_b32 s3, s1, 8
+; GFX9-NEXT: s_lshl_b32 s1, s1, s4
+; GFX9-NEXT: s_sext_i32_i16 s7, s0
+; GFX9-NEXT: s_sext_i32_i16 s8, 0xffff
+; GFX9-NEXT: s_cmp_gt_i32 s7, s8
+; GFX9-NEXT: s_movk_i32 s5, 0x7fff
+; GFX9-NEXT: s_cselect_b32 s9, s7, s8
+; GFX9-NEXT: s_sub_i32 s9, s9, s5
+; GFX9-NEXT: s_cmp_lt_i32 s7, s8
+; GFX9-NEXT: s_mov_b32 s6, 0x8000
+; GFX9-NEXT: s_cselect_b32 s7, s7, s8
+; GFX9-NEXT: s_sub_i32 s7, s7, s6
+; GFX9-NEXT: s_sext_i32_i16 s9, s9
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_cmp_gt_i32 s9, s1
+; GFX9-NEXT: s_cselect_b32 s1, s9, s1
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_sext_i32_i16 s7, s7
+; GFX9-NEXT: s_cmp_lt_i32 s1, s7
+; GFX9-NEXT: s_cselect_b32 s1, s1, s7
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: s_sext_i32_i16 s0, s0
+; GFX9-NEXT: s_lshl_b32 s1, s2, s4
+; GFX9-NEXT: s_lshl_b32 s2, s3, s4
+; GFX9-NEXT: s_ashr_i32 s0, s0, s4
+; GFX9-NEXT: s_sext_i32_i16 s3, s1
+; GFX9-NEXT: s_cmp_gt_i32 s3, s8
+; GFX9-NEXT: s_cselect_b32 s7, s3, s8
+; GFX9-NEXT: s_sub_i32 s5, s7, s5
+; GFX9-NEXT: s_cmp_lt_i32 s3, s8
+; GFX9-NEXT: s_cselect_b32 s3, s3, s8
+; GFX9-NEXT: s_sub_i32 s3, s3, s6
+; GFX9-NEXT: s_sext_i32_i16 s5, s5
+; GFX9-NEXT: s_sext_i32_i16 s2, s2
+; GFX9-NEXT: s_cmp_gt_i32 s5, s2
+; GFX9-NEXT: s_cselect_b32 s2, s5, s2
+; GFX9-NEXT: s_sext_i32_i16 s2, s2
+; GFX9-NEXT: s_sext_i32_i16 s3, s3
+; GFX9-NEXT: s_cmp_lt_i32 s2, s3
+; GFX9-NEXT: s_cselect_b32 s2, s2, s3
+; GFX9-NEXT: s_sub_i32 s1, s1, s2
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_movk_i32 s2, 0xff
+; GFX9-NEXT: s_ashr_i32 s1, s1, s4
+; GFX9-NEXT: s_and_b32 s1, s1, s2
+; GFX9-NEXT: s_and_b32 s0, s0, s2
+; GFX9-NEXT: s_lshl_b32 s1, s1, s4
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_v2i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX10-NEXT: s_lshr_b32 s3, s0, 8
+; GFX10-NEXT: s_lshl_b32 s0, s0, s2
+; GFX10-NEXT: s_sext_i32_i16 s6, 0xffff
+; GFX10-NEXT: s_sext_i32_i16 s5, s0
+; GFX10-NEXT: s_lshr_b32 s4, s1, 8
+; GFX10-NEXT: s_lshl_b32 s1, s1, s2
+; GFX10-NEXT: s_cmp_gt_i32 s5, s6
+; GFX10-NEXT: s_movk_i32 s7, 0x7fff
+; GFX10-NEXT: s_cselect_b32 s8, s5, s6
+; GFX10-NEXT: s_mov_b32 s9, 0x8000
+; GFX10-NEXT: s_sub_i32 s8, s8, s7
+; GFX10-NEXT: s_cmp_lt_i32 s5, s6
+; GFX10-NEXT: s_sext_i32_i16 s8, s8
+; GFX10-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_sub_i32 s5, s5, s9
+; GFX10-NEXT: s_cmp_gt_i32 s8, s1
+; GFX10-NEXT: s_sext_i32_i16 s5, s5
+; GFX10-NEXT: s_cselect_b32 s1, s8, s1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s5
+; GFX10-NEXT: s_cselect_b32 s1, s1, s5
+; GFX10-NEXT: s_lshl_b32 s3, s3, s2
+; GFX10-NEXT: s_sub_i32 s0, s0, s1
+; GFX10-NEXT: s_lshl_b32 s1, s4, s2
+; GFX10-NEXT: s_sext_i32_i16 s0, s0
+; GFX10-NEXT: s_sext_i32_i16 s4, s3
+; GFX10-NEXT: s_ashr_i32 s0, s0, s2
+; GFX10-NEXT: s_cmp_gt_i32 s4, s6
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cselect_b32 s5, s4, s6
+; GFX10-NEXT: s_sub_i32 s5, s5, s7
+; GFX10-NEXT: s_cmp_lt_i32 s4, s6
+; GFX10-NEXT: s_sext_i32_i16 s5, s5
+; GFX10-NEXT: s_cselect_b32 s4, s4, s6
+; GFX10-NEXT: s_sub_i32 s4, s4, s9
+; GFX10-NEXT: s_cmp_gt_i32 s5, s1
+; GFX10-NEXT: s_sext_i32_i16 s4, s4
+; GFX10-NEXT: s_cselect_b32 s1, s5, s1
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s4
+; GFX10-NEXT: s_cselect_b32 s1, s1, s4
+; GFX10-NEXT: s_sub_i32 s1, s3, s1
+; GFX10-NEXT: s_movk_i32 s3, 0xff
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_and_b32 s0, s0, s3
+; GFX10-NEXT: s_ashr_i32 s1, s1, s2
+; GFX10-NEXT: s_and_b32 s1, s1, s3
+; GFX10-NEXT: s_lshl_b32 s1, s1, s2
+; GFX10-NEXT: s_or_b32 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %lhs = bitcast i16 %lhs.arg to <2 x i8>
+ %rhs = bitcast i16 %rhs.arg to <2 x i8>
+ %result = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> %lhs, <2 x i8> %rhs)
+ %cast.result = bitcast <2 x i8> %result to i16
+ ret i16 %cast.result
+}
+
+define i32 @v_ssubsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
+; GFX6-LABEL: v_ssubsat_v4i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 8, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v4, 24, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v8, -1, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v5, 8, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v7, 24, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_subrev_i32_e32 v8, vcc, s4, v8
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v10, -1, v0
+; GFX6-NEXT: v_subrev_i32_e32 v10, vcc, s5, v10
+; GFX6-NEXT: v_max_i32_e32 v1, v8, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v10
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 24, v5
+; GFX6-NEXT: v_max_i32_e32 v5, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v5, vcc, s4, v5
+; GFX6-NEXT: v_min_i32_e32 v8, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v8, vcc, s5, v8
+; GFX6-NEXT: v_max_i32_e32 v2, v5, v2
+; GFX6-NEXT: v_min_i32_e32 v2, v2, v8
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 24, v3
+; GFX6-NEXT: v_bfrev_b32_e32 v9, -2
+; GFX6-NEXT: v_max_i32_e32 v5, -1, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 24, v6
+; GFX6-NEXT: v_min_i32_e32 v6, -1, v2
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v5, v9
+; GFX6-NEXT: v_subrev_i32_e32 v6, vcc, s5, v6
+; GFX6-NEXT: v_max_i32_e32 v3, v5, v3
+; GFX6-NEXT: v_min_i32_e32 v3, v3, v6
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 24, v4
+; GFX6-NEXT: v_max_i32_e32 v5, -1, v3
+; GFX6-NEXT: v_mov_b32_e32 v11, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v6, -1, v3
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 24, v1
+; GFX6-NEXT: s_movk_i32 s4, 0xff
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 24, v7
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v5, v9
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 24, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, v6, v11
+; GFX6-NEXT: v_max_i32_e32 v4, v5, v4
+; GFX6-NEXT: v_min_i32_e32 v4, v4, v6
+; GFX6-NEXT: v_ashrrev_i32_e32 v2, 24, v2
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, v3, v4
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v3, 24, v3
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_v4i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, 8
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v2, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: s_mov_b32 s6, 0xffff
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v9, s6, v0
+; GFX8-NEXT: v_lshrrev_b32_sdwa v2, v2, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v1
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_subrev_u16_e32 v9, s4, v9
+; GFX8-NEXT: s_mov_b32 s5, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v11, s6, v0
+; GFX8-NEXT: v_max_i16_e32 v1, v9, v1
+; GFX8-NEXT: v_subrev_u16_e32 v11, s5, v11
+; GFX8-NEXT: v_min_i16_e32 v1, v1, v11
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_max_i16_e32 v1, s6, v3
+; GFX8-NEXT: v_subrev_u16_e32 v1, s4, v1
+; GFX8-NEXT: v_min_i16_e32 v9, s6, v3
+; GFX8-NEXT: v_max_i16_e32 v1, v1, v2
+; GFX8-NEXT: v_subrev_u16_e32 v9, s5, v9
+; GFX8-NEXT: v_mov_b32_e32 v8, 0xffff
+; GFX8-NEXT: v_lshlrev_b16_e32 v2, 8, v4
+; GFX8-NEXT: v_min_i16_e32 v1, v1, v9
+; GFX8-NEXT: v_mov_b32_e32 v10, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v4, v2, v8
+; GFX8-NEXT: v_sub_u16_e32 v1, v3, v1
+; GFX8-NEXT: v_lshlrev_b16_e32 v3, 8, v6
+; GFX8-NEXT: v_min_i16_e32 v6, v2, v8
+; GFX8-NEXT: v_sub_u16_e32 v4, v4, v10
+; GFX8-NEXT: v_max_i16_e32 v3, v4, v3
+; GFX8-NEXT: v_subrev_u16_e32 v6, s5, v6
+; GFX8-NEXT: v_min_i16_e32 v3, v3, v6
+; GFX8-NEXT: v_sub_u16_e32 v2, v2, v3
+; GFX8-NEXT: v_lshlrev_b16_e32 v3, 8, v5
+; GFX8-NEXT: v_max_i16_e32 v5, v3, v8
+; GFX8-NEXT: v_min_i16_e32 v6, v3, v8
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v7
+; GFX8-NEXT: v_sub_u16_e32 v5, v5, v10
+; GFX8-NEXT: v_subrev_u16_e32 v6, 0x8000, v6
+; GFX8-NEXT: v_max_i16_e32 v4, v5, v4
+; GFX8-NEXT: v_min_i16_e32 v4, v4, v6
+; GFX8-NEXT: v_sub_u16_e32 v3, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v1), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_sdwa v0, sext(v0), v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v2), v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, sext(v3), v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_v4i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 8
+; GFX9-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: s_mov_b32 s6, 0xffff
+; GFX9-NEXT: v_lshrrev_b32_sdwa v5, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: v_max_i16_e32 v9, s6, v0
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_subrev_u16_e32 v9, s4, v9
+; GFX9-NEXT: s_mov_b32 s5, 0x8000
+; GFX9-NEXT: v_min_i16_e32 v11, s6, v0
+; GFX9-NEXT: v_max_i16_e32 v1, v9, v1
+; GFX9-NEXT: v_subrev_u16_e32 v11, s5, v11
+; GFX9-NEXT: v_min_i16_e32 v1, v1, v11
+; GFX9-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_max_i16_e32 v1, s6, v2
+; GFX9-NEXT: v_subrev_u16_e32 v1, s4, v1
+; GFX9-NEXT: v_min_i16_e32 v9, s6, v2
+; GFX9-NEXT: v_subrev_u16_e32 v9, s5, v9
+; GFX9-NEXT: v_max_i16_e32 v1, v1, v5
+; GFX9-NEXT: v_min_i16_e32 v1, v1, v9
+; GFX9-NEXT: v_sub_u16_e32 v1, v2, v1
+; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v3
+; GFX9-NEXT: v_mov_b32_e32 v8, 0xffff
+; GFX9-NEXT: v_mov_b32_e32 v10, 0x7fff
+; GFX9-NEXT: v_max_i16_e32 v5, v2, v8
+; GFX9-NEXT: v_lshlrev_b16_e32 v3, 8, v6
+; GFX9-NEXT: v_min_i16_e32 v6, v2, v8
+; GFX9-NEXT: v_sub_u16_e32 v5, v5, v10
+; GFX9-NEXT: v_subrev_u16_e32 v6, s5, v6
+; GFX9-NEXT: v_max_i16_e32 v3, v5, v3
+; GFX9-NEXT: v_min_i16_e32 v3, v3, v6
+; GFX9-NEXT: v_sub_u16_e32 v2, v2, v3
+; GFX9-NEXT: v_lshlrev_b16_e32 v3, 8, v4
+; GFX9-NEXT: v_max_i16_e32 v5, v3, v8
+; GFX9-NEXT: v_min_i16_e32 v6, v3, v8
+; GFX9-NEXT: v_lshlrev_b16_e32 v4, 8, v7
+; GFX9-NEXT: v_sub_u16_e32 v5, v5, v10
+; GFX9-NEXT: s_movk_i32 s4, 0xff
+; GFX9-NEXT: v_subrev_u16_e32 v6, 0x8000, v6
+; GFX9-NEXT: v_max_i16_e32 v4, v5, v4
+; GFX9-NEXT: v_and_b32_sdwa v1, sext(v1), s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_min_i16_e32 v4, v4, v6
+; GFX9-NEXT: v_ashrrev_i16_e32 v0, 8, v0
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_sub_u16_e32 v3, v3, v4
+; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, sext(v2), s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_and_b32_sdwa v2, sext(v3), s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_v4i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_mov_b32 s4, 8
+; GFX10-NEXT: s_mov_b32 s5, 16
+; GFX10-NEXT: s_mov_b32 s6, 24
+; GFX10-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, s5, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_lshrrev_b32_sdwa v19, s6, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_lshlrev_b16_e64 v0, 8, v0
+; GFX10-NEXT: v_lshrrev_b32_sdwa v5, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: s_mov_b32 s4, 0xffff
+; GFX10-NEXT: v_lshrrev_b32_sdwa v6, s5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: s_movk_i32 s5, 0x7fff
+; GFX10-NEXT: v_max_i16_e64 v8, v0, s4
+; GFX10-NEXT: v_lshrrev_b32_sdwa v7, s6, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_max_i16_e64 v9, v2, s4
+; GFX10-NEXT: v_lshlrev_b16_e64 v1, 8, v1
+; GFX10-NEXT: v_min_i16_e64 v10, v0, s4
+; GFX10-NEXT: v_sub_nc_u16_e64 v8, v8, s5
+; GFX10-NEXT: s_mov_b32 s6, 0x8000
+; GFX10-NEXT: v_sub_nc_u16_e64 v15, v9, s5
+; GFX10-NEXT: v_min_i16_e64 v11, v2, s4
+; GFX10-NEXT: v_mov_b32_e32 v12, 0xffff
+; GFX10-NEXT: v_max_i16_e64 v1, v8, v1
+; GFX10-NEXT: v_sub_nc_u16_e64 v10, v10, s6
+; GFX10-NEXT: v_max_i16_e64 v5, v15, v5
+; GFX10-NEXT: v_sub_nc_u16_e64 v8, v11, s6
+; GFX10-NEXT: v_mov_b32_e32 v9, 0x7fff
+; GFX10-NEXT: v_max_i16_e64 v11, v3, v12
+; GFX10-NEXT: v_min_i16_e64 v1, v1, v10
+; GFX10-NEXT: v_max_i16_e64 v10, v19, v12
+; GFX10-NEXT: v_min_i16_e64 v5, v5, v8
+; GFX10-NEXT: v_min_i16_e64 v8, v3, v12
+; GFX10-NEXT: v_sub_nc_u16_e64 v11, v11, v9
+; GFX10-NEXT: v_min_i16_e64 v12, v19, v12
+; GFX10-NEXT: v_sub_nc_u16_e64 v9, v10, v9
+; GFX10-NEXT: v_sub_nc_u16_e64 v2, v2, v5
+; GFX10-NEXT: v_sub_nc_u16_e64 v5, v8, s6
+; GFX10-NEXT: v_max_i16_e64 v6, v11, v6
+; GFX10-NEXT: v_sub_nc_u16_e64 v8, v12, 0x8000
+; GFX10-NEXT: v_max_i16_e64 v7, v9, v7
+; GFX10-NEXT: s_movk_i32 s4, 0xff
+; GFX10-NEXT: v_sub_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: v_and_b32_sdwa v1, sext(v2), s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_min_i16_e64 v2, v6, v5
+; GFX10-NEXT: v_min_i16_e64 v5, v7, v8
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_ashrrev_i16_e64 v0, 8, v0
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: v_sub_nc_u16_e64 v2, v3, v2
+; GFX10-NEXT: v_sub_nc_u16_e64 v3, v19, v5
+; GFX10-NEXT: v_and_or_b32 v0, v0, s4, v1
+; GFX10-NEXT: v_and_b32_sdwa v1, sext(v2), s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_b32_sdwa v2, sext(v3), s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %lhs = bitcast i32 %lhs.arg to <4 x i8>
+ %rhs = bitcast i32 %rhs.arg to <4 x i8>
+ %result = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %lhs, <4 x i8> %rhs)
+ %cast.result = bitcast <4 x i8> %result to i32
+ ret i32 %cast.result
+}
+
+define amdgpu_ps i32 @s_ssubsat_v4i8(i32 inreg %lhs.arg, i32 inreg %rhs.arg) {
+; GFX6-LABEL: s_ssubsat_v4i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshr_b32 s2, s0, 8
+; GFX6-NEXT: s_lshr_b32 s3, s0, 16
+; GFX6-NEXT: s_lshr_b32 s4, s0, 24
+; GFX6-NEXT: s_lshr_b32 s5, s1, 8
+; GFX6-NEXT: s_lshr_b32 s6, s1, 16
+; GFX6-NEXT: s_lshr_b32 s7, s1, 24
+; GFX6-NEXT: s_lshl_b32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_brev_b32 s8, -2
+; GFX6-NEXT: s_cselect_b32 s10, s0, -1
+; GFX6-NEXT: s_sub_i32 s10, s10, s8
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_mov_b32 s9, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s11, s0, -1
+; GFX6-NEXT: s_sub_i32 s11, s11, s9
+; GFX6-NEXT: s_cmp_gt_i32 s10, s1
+; GFX6-NEXT: s_cselect_b32 s1, s10, s1
+; GFX6-NEXT: s_cmp_lt_i32 s1, s11
+; GFX6-NEXT: s_cselect_b32 s1, s1, s11
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: s_lshl_b32 s1, s2, 24
+; GFX6-NEXT: s_ashr_i32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s2, s5, 24
+; GFX6-NEXT: s_cmp_gt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s5, s1, -1
+; GFX6-NEXT: s_sub_i32 s5, s5, s8
+; GFX6-NEXT: s_cmp_lt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s10, s1, -1
+; GFX6-NEXT: s_sub_i32 s10, s10, s9
+; GFX6-NEXT: s_cmp_gt_i32 s5, s2
+; GFX6-NEXT: s_cselect_b32 s2, s5, s2
+; GFX6-NEXT: s_cmp_lt_i32 s2, s10
+; GFX6-NEXT: s_cselect_b32 s2, s2, s10
+; GFX6-NEXT: s_sub_i32 s1, s1, s2
+; GFX6-NEXT: s_lshl_b32 s2, s3, 24
+; GFX6-NEXT: s_ashr_i32 s1, s1, 24
+; GFX6-NEXT: s_lshl_b32 s3, s6, 24
+; GFX6-NEXT: s_cmp_gt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s5, s2, -1
+; GFX6-NEXT: s_sub_i32 s5, s5, s8
+; GFX6-NEXT: s_cmp_lt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s6, s2, -1
+; GFX6-NEXT: s_sub_i32 s6, s6, s9
+; GFX6-NEXT: s_cmp_gt_i32 s5, s3
+; GFX6-NEXT: s_cselect_b32 s3, s5, s3
+; GFX6-NEXT: s_cmp_lt_i32 s3, s6
+; GFX6-NEXT: s_cselect_b32 s3, s3, s6
+; GFX6-NEXT: s_sub_i32 s2, s2, s3
+; GFX6-NEXT: s_lshl_b32 s3, s4, 24
+; GFX6-NEXT: s_ashr_i32 s2, s2, 24
+; GFX6-NEXT: s_lshl_b32 s4, s7, 24
+; GFX6-NEXT: s_cmp_gt_i32 s3, -1
+; GFX6-NEXT: s_cselect_b32 s5, s3, -1
+; GFX6-NEXT: s_sub_i32 s5, s5, s8
+; GFX6-NEXT: s_cmp_lt_i32 s3, -1
+; GFX6-NEXT: s_cselect_b32 s6, s3, -1
+; GFX6-NEXT: s_sub_i32 s6, s6, s9
+; GFX6-NEXT: s_cmp_gt_i32 s5, s4
+; GFX6-NEXT: s_cselect_b32 s4, s5, s4
+; GFX6-NEXT: s_cmp_lt_i32 s4, s6
+; GFX6-NEXT: s_cselect_b32 s4, s4, s6
+; GFX6-NEXT: s_sub_i32 s3, s3, s4
+; GFX6-NEXT: s_movk_i32 s4, 0xff
+; GFX6-NEXT: s_and_b32 s1, s1, s4
+; GFX6-NEXT: s_and_b32 s0, s0, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 8
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_ashr_i32 s3, s3, 24
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s3, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_v4i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s8, 8, 0x100000
+; GFX8-NEXT: s_lshr_b32 s2, s0, 8
+; GFX8-NEXT: s_lshr_b32 s3, s0, 16
+; GFX8-NEXT: s_lshr_b32 s4, s0, 24
+; GFX8-NEXT: s_lshl_b32 s0, s0, s8
+; GFX8-NEXT: s_lshr_b32 s5, s1, 8
+; GFX8-NEXT: s_lshr_b32 s6, s1, 16
+; GFX8-NEXT: s_lshr_b32 s7, s1, 24
+; GFX8-NEXT: s_lshl_b32 s1, s1, s8
+; GFX8-NEXT: s_sext_i32_i16 s11, s0
+; GFX8-NEXT: s_sext_i32_i16 s12, 0xffff
+; GFX8-NEXT: s_cmp_gt_i32 s11, s12
+; GFX8-NEXT: s_movk_i32 s9, 0x7fff
+; GFX8-NEXT: s_cselect_b32 s13, s11, s12
+; GFX8-NEXT: s_sub_i32 s13, s13, s9
+; GFX8-NEXT: s_cmp_lt_i32 s11, s12
+; GFX8-NEXT: s_mov_b32 s10, 0x8000
+; GFX8-NEXT: s_cselect_b32 s11, s11, s12
+; GFX8-NEXT: s_sub_i32 s11, s11, s10
+; GFX8-NEXT: s_sext_i32_i16 s13, s13
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_cmp_gt_i32 s13, s1
+; GFX8-NEXT: s_cselect_b32 s1, s13, s1
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_sext_i32_i16 s11, s11
+; GFX8-NEXT: s_cmp_lt_i32 s1, s11
+; GFX8-NEXT: s_cselect_b32 s1, s1, s11
+; GFX8-NEXT: s_sub_i32 s0, s0, s1
+; GFX8-NEXT: s_sext_i32_i16 s0, s0
+; GFX8-NEXT: s_lshl_b32 s1, s2, s8
+; GFX8-NEXT: s_lshl_b32 s2, s5, s8
+; GFX8-NEXT: s_ashr_i32 s0, s0, s8
+; GFX8-NEXT: s_sext_i32_i16 s5, s1
+; GFX8-NEXT: s_cmp_gt_i32 s5, s12
+; GFX8-NEXT: s_cselect_b32 s11, s5, s12
+; GFX8-NEXT: s_sub_i32 s11, s11, s9
+; GFX8-NEXT: s_cmp_lt_i32 s5, s12
+; GFX8-NEXT: s_cselect_b32 s5, s5, s12
+; GFX8-NEXT: s_sub_i32 s5, s5, s10
+; GFX8-NEXT: s_sext_i32_i16 s11, s11
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_cmp_gt_i32 s11, s2
+; GFX8-NEXT: s_cselect_b32 s2, s11, s2
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_cmp_lt_i32 s2, s5
+; GFX8-NEXT: s_cselect_b32 s2, s2, s5
+; GFX8-NEXT: s_sub_i32 s1, s1, s2
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_lshl_b32 s2, s3, s8
+; GFX8-NEXT: s_lshl_b32 s3, s6, s8
+; GFX8-NEXT: s_ashr_i32 s1, s1, s8
+; GFX8-NEXT: s_sext_i32_i16 s5, s2
+; GFX8-NEXT: s_cmp_gt_i32 s5, s12
+; GFX8-NEXT: s_cselect_b32 s6, s5, s12
+; GFX8-NEXT: s_sub_i32 s6, s6, s9
+; GFX8-NEXT: s_cmp_lt_i32 s5, s12
+; GFX8-NEXT: s_cselect_b32 s5, s5, s12
+; GFX8-NEXT: s_sub_i32 s5, s5, s10
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_cmp_gt_i32 s6, s3
+; GFX8-NEXT: s_cselect_b32 s3, s6, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_cmp_lt_i32 s3, s5
+; GFX8-NEXT: s_cselect_b32 s3, s3, s5
+; GFX8-NEXT: s_sub_i32 s2, s2, s3
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_lshl_b32 s3, s4, s8
+; GFX8-NEXT: s_lshl_b32 s4, s7, s8
+; GFX8-NEXT: s_ashr_i32 s2, s2, s8
+; GFX8-NEXT: s_sext_i32_i16 s5, s3
+; GFX8-NEXT: s_cmp_gt_i32 s5, s12
+; GFX8-NEXT: s_cselect_b32 s6, s5, s12
+; GFX8-NEXT: s_sub_i32 s6, s6, s9
+; GFX8-NEXT: s_cmp_lt_i32 s5, s12
+; GFX8-NEXT: s_cselect_b32 s5, s5, s12
+; GFX8-NEXT: s_sub_i32 s5, s5, s10
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_cmp_gt_i32 s6, s4
+; GFX8-NEXT: s_cselect_b32 s4, s6, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_cmp_lt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_sub_i32 s3, s3, s4
+; GFX8-NEXT: s_movk_i32 s4, 0xff
+; GFX8-NEXT: s_and_b32 s1, s1, s4
+; GFX8-NEXT: s_and_b32 s0, s0, s4
+; GFX8-NEXT: s_lshl_b32 s1, s1, 8
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: s_and_b32 s1, s2, s4
+; GFX8-NEXT: s_lshl_b32 s1, s1, 16
+; GFX8-NEXT: s_ashr_i32 s3, s3, s8
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: s_and_b32 s1, s3, s4
+; GFX8-NEXT: s_lshl_b32 s1, s1, 24
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_v4i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s8, 8, 0x100000
+; GFX9-NEXT: s_lshr_b32 s2, s0, 8
+; GFX9-NEXT: s_lshr_b32 s3, s0, 16
+; GFX9-NEXT: s_lshr_b32 s4, s0, 24
+; GFX9-NEXT: s_lshl_b32 s0, s0, s8
+; GFX9-NEXT: s_lshr_b32 s5, s1, 8
+; GFX9-NEXT: s_lshr_b32 s6, s1, 16
+; GFX9-NEXT: s_lshr_b32 s7, s1, 24
+; GFX9-NEXT: s_lshl_b32 s1, s1, s8
+; GFX9-NEXT: s_sext_i32_i16 s11, s0
+; GFX9-NEXT: s_sext_i32_i16 s12, 0xffff
+; GFX9-NEXT: s_cmp_gt_i32 s11, s12
+; GFX9-NEXT: s_movk_i32 s9, 0x7fff
+; GFX9-NEXT: s_cselect_b32 s13, s11, s12
+; GFX9-NEXT: s_sub_i32 s13, s13, s9
+; GFX9-NEXT: s_cmp_lt_i32 s11, s12
+; GFX9-NEXT: s_mov_b32 s10, 0x8000
+; GFX9-NEXT: s_cselect_b32 s11, s11, s12
+; GFX9-NEXT: s_sub_i32 s11, s11, s10
+; GFX9-NEXT: s_sext_i32_i16 s13, s13
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_cmp_gt_i32 s13, s1
+; GFX9-NEXT: s_cselect_b32 s1, s13, s1
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_sext_i32_i16 s11, s11
+; GFX9-NEXT: s_cmp_lt_i32 s1, s11
+; GFX9-NEXT: s_cselect_b32 s1, s1, s11
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: s_sext_i32_i16 s0, s0
+; GFX9-NEXT: s_lshl_b32 s1, s2, s8
+; GFX9-NEXT: s_lshl_b32 s2, s5, s8
+; GFX9-NEXT: s_ashr_i32 s0, s0, s8
+; GFX9-NEXT: s_sext_i32_i16 s5, s1
+; GFX9-NEXT: s_cmp_gt_i32 s5, s12
+; GFX9-NEXT: s_cselect_b32 s11, s5, s12
+; GFX9-NEXT: s_sub_i32 s11, s11, s9
+; GFX9-NEXT: s_cmp_lt_i32 s5, s12
+; GFX9-NEXT: s_cselect_b32 s5, s5, s12
+; GFX9-NEXT: s_sub_i32 s5, s5, s10
+; GFX9-NEXT: s_sext_i32_i16 s11, s11
+; GFX9-NEXT: s_sext_i32_i16 s2, s2
+; GFX9-NEXT: s_cmp_gt_i32 s11, s2
+; GFX9-NEXT: s_cselect_b32 s2, s11, s2
+; GFX9-NEXT: s_sext_i32_i16 s2, s2
+; GFX9-NEXT: s_sext_i32_i16 s5, s5
+; GFX9-NEXT: s_cmp_lt_i32 s2, s5
+; GFX9-NEXT: s_cselect_b32 s2, s2, s5
+; GFX9-NEXT: s_sub_i32 s1, s1, s2
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_lshl_b32 s2, s3, s8
+; GFX9-NEXT: s_lshl_b32 s3, s6, s8
+; GFX9-NEXT: s_ashr_i32 s1, s1, s8
+; GFX9-NEXT: s_sext_i32_i16 s5, s2
+; GFX9-NEXT: s_cmp_gt_i32 s5, s12
+; GFX9-NEXT: s_cselect_b32 s6, s5, s12
+; GFX9-NEXT: s_sub_i32 s6, s6, s9
+; GFX9-NEXT: s_cmp_lt_i32 s5, s12
+; GFX9-NEXT: s_cselect_b32 s5, s5, s12
+; GFX9-NEXT: s_sub_i32 s5, s5, s10
+; GFX9-NEXT: s_sext_i32_i16 s6, s6
+; GFX9-NEXT: s_sext_i32_i16 s3, s3
+; GFX9-NEXT: s_cmp_gt_i32 s6, s3
+; GFX9-NEXT: s_cselect_b32 s3, s6, s3
+; GFX9-NEXT: s_sext_i32_i16 s3, s3
+; GFX9-NEXT: s_sext_i32_i16 s5, s5
+; GFX9-NEXT: s_cmp_lt_i32 s3, s5
+; GFX9-NEXT: s_cselect_b32 s3, s3, s5
+; GFX9-NEXT: s_sub_i32 s2, s2, s3
+; GFX9-NEXT: s_sext_i32_i16 s2, s2
+; GFX9-NEXT: s_lshl_b32 s3, s4, s8
+; GFX9-NEXT: s_lshl_b32 s4, s7, s8
+; GFX9-NEXT: s_ashr_i32 s2, s2, s8
+; GFX9-NEXT: s_sext_i32_i16 s5, s3
+; GFX9-NEXT: s_cmp_gt_i32 s5, s12
+; GFX9-NEXT: s_cselect_b32 s6, s5, s12
+; GFX9-NEXT: s_sub_i32 s6, s6, s9
+; GFX9-NEXT: s_cmp_lt_i32 s5, s12
+; GFX9-NEXT: s_cselect_b32 s5, s5, s12
+; GFX9-NEXT: s_sub_i32 s5, s5, s10
+; GFX9-NEXT: s_sext_i32_i16 s6, s6
+; GFX9-NEXT: s_sext_i32_i16 s4, s4
+; GFX9-NEXT: s_cmp_gt_i32 s6, s4
+; GFX9-NEXT: s_cselect_b32 s4, s6, s4
+; GFX9-NEXT: s_sext_i32_i16 s4, s4
+; GFX9-NEXT: s_sext_i32_i16 s5, s5
+; GFX9-NEXT: s_cmp_lt_i32 s4, s5
+; GFX9-NEXT: s_cselect_b32 s4, s4, s5
+; GFX9-NEXT: s_sub_i32 s3, s3, s4
+; GFX9-NEXT: s_movk_i32 s4, 0xff
+; GFX9-NEXT: s_and_b32 s1, s1, s4
+; GFX9-NEXT: s_and_b32 s0, s0, s4
+; GFX9-NEXT: s_lshl_b32 s1, s1, 8
+; GFX9-NEXT: s_sext_i32_i16 s3, s3
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: s_and_b32 s1, s2, s4
+; GFX9-NEXT: s_lshl_b32 s1, s1, 16
+; GFX9-NEXT: s_ashr_i32 s3, s3, s8
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: s_and_b32 s1, s3, s4
+; GFX9-NEXT: s_lshl_b32 s1, s1, 24
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_v4i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s6, 8, 0x100000
+; GFX10-NEXT: s_lshr_b32 s2, s0, 8
+; GFX10-NEXT: s_lshr_b32 s3, s0, 16
+; GFX10-NEXT: s_lshr_b32 s4, s0, 24
+; GFX10-NEXT: s_lshl_b32 s0, s0, s6
+; GFX10-NEXT: s_sext_i32_i16 s10, 0xffff
+; GFX10-NEXT: s_sext_i32_i16 s9, s0
+; GFX10-NEXT: s_lshr_b32 s5, s1, 8
+; GFX10-NEXT: s_lshr_b32 s7, s1, 16
+; GFX10-NEXT: s_lshr_b32 s8, s1, 24
+; GFX10-NEXT: s_lshl_b32 s1, s1, s6
+; GFX10-NEXT: s_cmp_gt_i32 s9, s10
+; GFX10-NEXT: s_movk_i32 s11, 0x7fff
+; GFX10-NEXT: s_cselect_b32 s12, s9, s10
+; GFX10-NEXT: s_mov_b32 s13, 0x8000
+; GFX10-NEXT: s_sub_i32 s12, s12, s11
+; GFX10-NEXT: s_cmp_lt_i32 s9, s10
+; GFX10-NEXT: s_sext_i32_i16 s12, s12
+; GFX10-NEXT: s_cselect_b32 s9, s9, s10
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_sub_i32 s9, s9, s13
+; GFX10-NEXT: s_cmp_gt_i32 s12, s1
+; GFX10-NEXT: s_sext_i32_i16 s9, s9
+; GFX10-NEXT: s_cselect_b32 s1, s12, s1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s9
+; GFX10-NEXT: s_cselect_b32 s1, s1, s9
+; GFX10-NEXT: s_lshl_b32 s5, s5, s6
+; GFX10-NEXT: s_sub_i32 s0, s0, s1
+; GFX10-NEXT: s_lshl_b32 s1, s2, s6
+; GFX10-NEXT: s_sext_i32_i16 s0, s0
+; GFX10-NEXT: s_sext_i32_i16 s2, s1
+; GFX10-NEXT: s_ashr_i32 s0, s0, s6
+; GFX10-NEXT: s_cmp_gt_i32 s2, s10
+; GFX10-NEXT: s_sext_i32_i16 s5, s5
+; GFX10-NEXT: s_cselect_b32 s9, s2, s10
+; GFX10-NEXT: s_sub_i32 s9, s9, s11
+; GFX10-NEXT: s_cmp_lt_i32 s2, s10
+; GFX10-NEXT: s_sext_i32_i16 s9, s9
+; GFX10-NEXT: s_cselect_b32 s2, s2, s10
+; GFX10-NEXT: s_sub_i32 s2, s2, s13
+; GFX10-NEXT: s_cmp_gt_i32 s9, s5
+; GFX10-NEXT: s_sext_i32_i16 s2, s2
+; GFX10-NEXT: s_cselect_b32 s5, s9, s5
+; GFX10-NEXT: s_sext_i32_i16 s5, s5
+; GFX10-NEXT: s_cmp_lt_i32 s5, s2
+; GFX10-NEXT: s_cselect_b32 s2, s5, s2
+; GFX10-NEXT: s_lshl_b32 s3, s3, s6
+; GFX10-NEXT: s_sub_i32 s1, s1, s2
+; GFX10-NEXT: s_sext_i32_i16 s5, s3
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_lshl_b32 s2, s7, s6
+; GFX10-NEXT: s_ashr_i32 s1, s1, s6
+; GFX10-NEXT: s_cmp_gt_i32 s5, s10
+; GFX10-NEXT: s_sext_i32_i16 s2, s2
+; GFX10-NEXT: s_cselect_b32 s7, s5, s10
+; GFX10-NEXT: s_sub_i32 s7, s7, s11
+; GFX10-NEXT: s_cmp_lt_i32 s5, s10
+; GFX10-NEXT: s_sext_i32_i16 s7, s7
+; GFX10-NEXT: s_cselect_b32 s5, s5, s10
+; GFX10-NEXT: s_sub_i32 s5, s5, s13
+; GFX10-NEXT: s_cmp_gt_i32 s7, s2
+; GFX10-NEXT: s_sext_i32_i16 s5, s5
+; GFX10-NEXT: s_cselect_b32 s2, s7, s2
+; GFX10-NEXT: s_sext_i32_i16 s2, s2
+; GFX10-NEXT: s_cmp_lt_i32 s2, s5
+; GFX10-NEXT: s_cselect_b32 s2, s2, s5
+; GFX10-NEXT: s_lshl_b32 s4, s4, s6
+; GFX10-NEXT: s_sub_i32 s2, s3, s2
+; GFX10-NEXT: s_sext_i32_i16 s5, s4
+; GFX10-NEXT: s_sext_i32_i16 s2, s2
+; GFX10-NEXT: s_lshl_b32 s3, s8, s6
+; GFX10-NEXT: s_ashr_i32 s2, s2, s6
+; GFX10-NEXT: s_cmp_gt_i32 s5, s10
+; GFX10-NEXT: s_sext_i32_i16 s3, s3
+; GFX10-NEXT: s_cselect_b32 s7, s5, s10
+; GFX10-NEXT: s_sub_i32 s7, s7, s11
+; GFX10-NEXT: s_cmp_lt_i32 s5, s10
+; GFX10-NEXT: s_sext_i32_i16 s7, s7
+; GFX10-NEXT: s_cselect_b32 s5, s5, s10
+; GFX10-NEXT: s_sub_i32 s5, s5, s13
+; GFX10-NEXT: s_cmp_gt_i32 s7, s3
+; GFX10-NEXT: s_sext_i32_i16 s5, s5
+; GFX10-NEXT: s_cselect_b32 s3, s7, s3
+; GFX10-NEXT: s_movk_i32 s7, 0xff
+; GFX10-NEXT: s_sext_i32_i16 s3, s3
+; GFX10-NEXT: s_cmp_lt_i32 s3, s5
+; GFX10-NEXT: s_cselect_b32 s3, s3, s5
+; GFX10-NEXT: s_and_b32 s1, s1, s7
+; GFX10-NEXT: s_sub_i32 s3, s4, s3
+; GFX10-NEXT: s_and_b32 s0, s0, s7
+; GFX10-NEXT: s_sext_i32_i16 s3, s3
+; GFX10-NEXT: s_lshl_b32 s1, s1, 8
+; GFX10-NEXT: s_and_b32 s2, s2, s7
+; GFX10-NEXT: s_ashr_i32 s3, s3, s6
+; GFX10-NEXT: s_or_b32 s0, s0, s1
+; GFX10-NEXT: s_lshl_b32 s1, s2, 16
+; GFX10-NEXT: s_and_b32 s2, s3, s7
+; GFX10-NEXT: s_or_b32 s0, s0, s1
+; GFX10-NEXT: s_lshl_b32 s1, s2, 24
+; GFX10-NEXT: s_or_b32 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %lhs = bitcast i32 %lhs.arg to <4 x i8>
+ %rhs = bitcast i32 %rhs.arg to <4 x i8>
+ %result = call <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8> %lhs, <4 x i8> %rhs)
+ %cast.result = bitcast <4 x i8> %result to i32
+ ret i32 %cast.result
+}
+
+define i24 @v_ssubsat_i24(i24 %lhs, i24 %rhs) {
+; GFX6-LABEL: v_ssubsat_i24:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX6-NEXT: v_max_i32_e32 v2, -1, v0
+; GFX6-NEXT: v_min_i32_e32 v3, -1, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, 0x7fffffff, v2
+; GFX6-NEXT: v_subrev_i32_e32 v3, vcc, 0x80000000, v3
+; GFX6-NEXT: v_max_i32_e32 v1, v2, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 8, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_i24:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, v0, v1
+; GFX8-NEXT: v_bfe_i32 v3, v2, 0, 24
+; GFX8-NEXT: v_bfe_i32 v0, v0, 0, 24
+; GFX8-NEXT: v_cmp_lt_i32_e64 s[4:5], v3, v0
+; GFX8-NEXT: v_bfe_i32 v0, v1, 0, 24
+; GFX8-NEXT: s_bfe_i32 s6, 0, 0x180000
+; GFX8-NEXT: v_cmp_lt_i32_e64 s[6:7], s6, v0
+; GFX8-NEXT: v_ashrrev_i32_e32 v0, 23, v3
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0xff800000, v0
+; GFX8-NEXT: s_xor_b64 vcc, s[6:7], s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_i24:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX9-NEXT: v_max_i32_e32 v2, -1, v0
+; GFX9-NEXT: v_min_i32_e32 v3, -1, v0
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_subrev_u32_e32 v2, 0x7fffffff, v2
+; GFX9-NEXT: v_subrev_u32_e32 v3, 0x80000000, v3
+; GFX9-NEXT: v_max_i32_e32 v1, v2, v1
+; GFX9-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v1
+; GFX9-NEXT: v_ashrrev_i32_e32 v0, 8, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_i24:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_max_i32_e32 v2, -1, v0
+; GFX10-NEXT: v_min_i32_e32 v3, -1, v0
+; GFX10-NEXT: v_subrev_nc_u32_e32 v2, 0x7fffffff, v2
+; GFX10-NEXT: v_subrev_nc_u32_e32 v3, 0x80000000, v3
+; GFX10-NEXT: v_max_i32_e32 v1, v2, v1
+; GFX10-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT: v_ashrrev_i32_e32 v0, 8, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i24 @llvm.ssub.sat.i24(i24 %lhs, i24 %rhs)
+ ret i24 %result
+}
+
+define amdgpu_ps i24 @s_ssubsat_i24(i24 inreg %lhs, i24 inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_i24:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 8
+; GFX6-NEXT: s_lshl_b32 s1, s1, 8
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s2, s0, -1
+; GFX6-NEXT: s_sub_i32 s2, s2, 0x7fffffff
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s3, s0, -1
+; GFX6-NEXT: s_sub_i32 s3, s3, 0x80000000
+; GFX6-NEXT: s_cmp_gt_i32 s2, s1
+; GFX6-NEXT: s_cselect_b32 s1, s2, s1
+; GFX6-NEXT: s_cmp_lt_i32 s1, s3
+; GFX6-NEXT: s_cselect_b32 s1, s1, s3
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: s_ashr_i32 s0, s0, 8
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_i24:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_sub_i32 s2, s0, s1
+; GFX8-NEXT: s_bfe_i32 s3, s2, 0x180000
+; GFX8-NEXT: s_bfe_i32 s0, s0, 0x180000
+; GFX8-NEXT: s_cmp_lt_i32 s3, s0
+; GFX8-NEXT: s_cselect_b32 s0, 1, 0
+; GFX8-NEXT: s_bfe_i32 s1, s1, 0x180000
+; GFX8-NEXT: s_bfe_i32 s4, 0, 0x180000
+; GFX8-NEXT: s_cmp_gt_i32 s1, s4
+; GFX8-NEXT: s_cselect_b32 s1, 1, 0
+; GFX8-NEXT: s_xor_b32 s0, s1, s0
+; GFX8-NEXT: s_ashr_i32 s1, s3, 23
+; GFX8-NEXT: s_add_i32 s1, s1, 0xff800000
+; GFX8-NEXT: s_and_b32 s0, s0, 1
+; GFX8-NEXT: s_cmp_lg_u32 s0, 0
+; GFX8-NEXT: s_cselect_b32 s0, s1, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_i24:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_lshl_b32 s0, s0, 8
+; GFX9-NEXT: s_lshl_b32 s1, s1, 8
+; GFX9-NEXT: s_cmp_gt_i32 s0, -1
+; GFX9-NEXT: s_cselect_b32 s2, s0, -1
+; GFX9-NEXT: s_sub_i32 s2, s2, 0x7fffffff
+; GFX9-NEXT: s_cmp_lt_i32 s0, -1
+; GFX9-NEXT: s_cselect_b32 s3, s0, -1
+; GFX9-NEXT: s_sub_i32 s3, s3, 0x80000000
+; GFX9-NEXT: s_cmp_gt_i32 s2, s1
+; GFX9-NEXT: s_cselect_b32 s1, s2, s1
+; GFX9-NEXT: s_cmp_lt_i32 s1, s3
+; GFX9-NEXT: s_cselect_b32 s1, s1, s3
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: s_ashr_i32 s0, s0, 8
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_i24:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_lshl_b32 s0, s0, 8
+; GFX10-NEXT: s_lshl_b32 s1, s1, 8
+; GFX10-NEXT: s_cmp_gt_i32 s0, -1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s2, s0, -1
+; GFX10-NEXT: s_sub_i32 s2, s2, 0x7fffffff
+; GFX10-NEXT: s_cmp_lt_i32 s0, -1
+; GFX10-NEXT: s_cselect_b32 s3, s0, -1
+; GFX10-NEXT: s_sub_i32 s3, s3, 0x80000000
+; GFX10-NEXT: s_cmp_gt_i32 s2, s1
+; GFX10-NEXT: s_cselect_b32 s1, s2, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s3
+; GFX10-NEXT: s_cselect_b32 s1, s1, s3
+; GFX10-NEXT: s_sub_i32 s0, s0, s1
+; GFX10-NEXT: s_ashr_i32 s0, s0, 8
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i24 @llvm.ssub.sat.i24(i24 %lhs, i24 %rhs)
+ ret i24 %result
+}
+
+define i32 @v_ssubsat_i32(i32 %lhs, i32 %rhs) {
+; GFX6-LABEL: v_ssubsat_i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_max_i32_e32 v2, -1, v0
+; GFX6-NEXT: v_min_i32_e32 v3, -1, v0
+; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, 0x7fffffff, v2
+; GFX6-NEXT: v_subrev_i32_e32 v3, vcc, 0x80000000, v3
+; GFX6-NEXT: v_max_i32_e32 v1, v2, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_max_i32_e32 v2, -1, v0
+; GFX8-NEXT: v_min_i32_e32 v3, -1, v0
+; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, 0x7fffffff, v2
+; GFX8-NEXT: v_subrev_u32_e32 v3, vcc, 0x80000000, v3
+; GFX8-NEXT: v_max_i32_e32 v1, v2, v1
+; GFX8-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_max_i32_e32 v2, -1, v0
+; GFX9-NEXT: v_min_i32_e32 v3, -1, v0
+; GFX9-NEXT: v_subrev_u32_e32 v2, 0x7fffffff, v2
+; GFX9-NEXT: v_subrev_u32_e32 v3, 0x80000000, v3
+; GFX9-NEXT: v_max_i32_e32 v1, v2, v1
+; GFX9-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_max_i32_e32 v2, -1, v0
+; GFX10-NEXT: v_min_i32_e32 v3, -1, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_subrev_nc_u32_e32 v2, 0x7fffffff, v2
+; GFX10-NEXT: v_subrev_nc_u32_e32 v3, 0x80000000, v3
+; GFX10-NEXT: v_max_i32_e32 v1, v2, v1
+; GFX10-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i32 @llvm.ssub.sat.i32(i32 %lhs, i32 %rhs)
+ ret i32 %result
+}
+
+define amdgpu_ps i32 @s_ssubsat_i32(i32 inreg %lhs, i32 inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s2, s0, -1
+; GFX6-NEXT: s_sub_i32 s2, s2, 0x7fffffff
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s3, s0, -1
+; GFX6-NEXT: s_sub_i32 s3, s3, 0x80000000
+; GFX6-NEXT: s_cmp_gt_i32 s2, s1
+; GFX6-NEXT: s_cselect_b32 s1, s2, s1
+; GFX6-NEXT: s_cmp_lt_i32 s1, s3
+; GFX6-NEXT: s_cselect_b32 s1, s1, s3
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_gt_i32 s0, -1
+; GFX8-NEXT: s_cselect_b32 s2, s0, -1
+; GFX8-NEXT: s_sub_i32 s2, s2, 0x7fffffff
+; GFX8-NEXT: s_cmp_lt_i32 s0, -1
+; GFX8-NEXT: s_cselect_b32 s3, s0, -1
+; GFX8-NEXT: s_sub_i32 s3, s3, 0x80000000
+; GFX8-NEXT: s_cmp_gt_i32 s2, s1
+; GFX8-NEXT: s_cselect_b32 s1, s2, s1
+; GFX8-NEXT: s_cmp_lt_i32 s1, s3
+; GFX8-NEXT: s_cselect_b32 s1, s1, s3
+; GFX8-NEXT: s_sub_i32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_gt_i32 s0, -1
+; GFX9-NEXT: s_cselect_b32 s2, s0, -1
+; GFX9-NEXT: s_sub_i32 s2, s2, 0x7fffffff
+; GFX9-NEXT: s_cmp_lt_i32 s0, -1
+; GFX9-NEXT: s_cselect_b32 s3, s0, -1
+; GFX9-NEXT: s_sub_i32 s3, s3, 0x80000000
+; GFX9-NEXT: s_cmp_gt_i32 s2, s1
+; GFX9-NEXT: s_cselect_b32 s1, s2, s1
+; GFX9-NEXT: s_cmp_lt_i32 s1, s3
+; GFX9-NEXT: s_cselect_b32 s1, s1, s3
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_gt_i32 s0, -1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s2, s0, -1
+; GFX10-NEXT: s_sub_i32 s2, s2, 0x7fffffff
+; GFX10-NEXT: s_cmp_lt_i32 s0, -1
+; GFX10-NEXT: s_cselect_b32 s3, s0, -1
+; GFX10-NEXT: s_sub_i32 s3, s3, 0x80000000
+; GFX10-NEXT: s_cmp_gt_i32 s2, s1
+; GFX10-NEXT: s_cselect_b32 s1, s2, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s3
+; GFX10-NEXT: s_cselect_b32 s1, s1, s3
+; GFX10-NEXT: s_sub_i32 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i32 @llvm.ssub.sat.i32(i32 %lhs, i32 %rhs)
+ ret i32 %result
+}
+
+define amdgpu_ps float @ssubsat_i32_sv(i32 inreg %lhs, i32 %rhs) {
+; GFX6-LABEL: ssubsat_i32_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s1, s0, -1
+; GFX6-NEXT: s_sub_i32 s1, s1, 0x7fffffff
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s2, s0, -1
+; GFX6-NEXT: s_sub_i32 s2, s2, 0x80000000
+; GFX6-NEXT: v_max_i32_e32 v0, s1, v0
+; GFX6-NEXT: v_min_i32_e32 v0, s2, v0
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s0, v0
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: ssubsat_i32_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_gt_i32 s0, -1
+; GFX8-NEXT: s_cselect_b32 s1, s0, -1
+; GFX8-NEXT: s_sub_i32 s1, s1, 0x7fffffff
+; GFX8-NEXT: s_cmp_lt_i32 s0, -1
+; GFX8-NEXT: s_cselect_b32 s2, s0, -1
+; GFX8-NEXT: s_sub_i32 s2, s2, 0x80000000
+; GFX8-NEXT: v_max_i32_e32 v0, s1, v0
+; GFX8-NEXT: v_min_i32_e32 v0, s2, v0
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, s0, v0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: ssubsat_i32_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_gt_i32 s0, -1
+; GFX9-NEXT: s_cselect_b32 s1, s0, -1
+; GFX9-NEXT: s_sub_i32 s1, s1, 0x7fffffff
+; GFX9-NEXT: s_cmp_lt_i32 s0, -1
+; GFX9-NEXT: s_cselect_b32 s2, s0, -1
+; GFX9-NEXT: s_sub_i32 s2, s2, 0x80000000
+; GFX9-NEXT: v_max_i32_e32 v0, s1, v0
+; GFX9-NEXT: v_min_i32_e32 v0, s2, v0
+; GFX9-NEXT: v_sub_u32_e32 v0, s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: ssubsat_i32_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_gt_i32 s0, -1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s1, s0, -1
+; GFX10-NEXT: s_sub_i32 s1, s1, 0x7fffffff
+; GFX10-NEXT: s_cmp_lt_i32 s0, -1
+; GFX10-NEXT: v_max_i32_e32 v0, s1, v0
+; GFX10-NEXT: s_cselect_b32 s1, s0, -1
+; GFX10-NEXT: s_sub_i32 s1, s1, 0x80000000
+; GFX10-NEXT: v_min_i32_e32 v0, s1, v0
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, s0, v0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i32 @llvm.ssub.sat.i32(i32 %lhs, i32 %rhs)
+ %cast = bitcast i32 %result to float
+ ret float %cast
+}
+
+define amdgpu_ps float @ssubsat_i32_vs(i32 %lhs, i32 inreg %rhs) {
+; GFX6-LABEL: ssubsat_i32_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_max_i32_e32 v1, -1, v0
+; GFX6-NEXT: v_subrev_i32_e32 v1, vcc, 0x7fffffff, v1
+; GFX6-NEXT: v_min_i32_e32 v2, -1, v0
+; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, 0x80000000, v2
+; GFX6-NEXT: v_max_i32_e32 v1, s0, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: ssubsat_i32_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_max_i32_e32 v1, -1, v0
+; GFX8-NEXT: v_subrev_u32_e32 v1, vcc, 0x7fffffff, v1
+; GFX8-NEXT: v_min_i32_e32 v2, -1, v0
+; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, 0x80000000, v2
+; GFX8-NEXT: v_max_i32_e32 v1, s0, v1
+; GFX8-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: ssubsat_i32_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_max_i32_e32 v1, -1, v0
+; GFX9-NEXT: v_subrev_u32_e32 v1, 0x7fffffff, v1
+; GFX9-NEXT: v_min_i32_e32 v2, -1, v0
+; GFX9-NEXT: v_subrev_u32_e32 v2, 0x80000000, v2
+; GFX9-NEXT: v_max_i32_e32 v1, s0, v1
+; GFX9-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: ssubsat_i32_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_max_i32_e32 v1, -1, v0
+; GFX10-NEXT: v_min_i32_e32 v2, -1, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_subrev_nc_u32_e32 v1, 0x7fffffff, v1
+; GFX10-NEXT: v_subrev_nc_u32_e32 v2, 0x80000000, v2
+; GFX10-NEXT: v_max_i32_e32 v1, s0, v1
+; GFX10-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i32 @llvm.ssub.sat.i32(i32 %lhs, i32 %rhs)
+ %cast = bitcast i32 %result to float
+ ret float %cast
+}
+
+define <2 x i32> @v_ssubsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; GFX6-LABEL: v_ssubsat_v2i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v4, -1, v0
+; GFX6-NEXT: v_subrev_i32_e32 v4, vcc, s4, v4
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v5, -1, v0
+; GFX6-NEXT: v_max_i32_e32 v2, v4, v2
+; GFX6-NEXT: v_subrev_i32_e32 v5, vcc, s5, v5
+; GFX6-NEXT: v_min_i32_e32 v2, v2, v5
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_max_i32_e32 v2, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, s4, v2
+; GFX6-NEXT: v_min_i32_e32 v4, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v4, vcc, s5, v4
+; GFX6-NEXT: v_max_i32_e32 v2, v2, v3
+; GFX6-NEXT: v_min_i32_e32 v2, v2, v4
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_v2i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_brev_b32 s4, -2
+; GFX8-NEXT: v_max_i32_e32 v4, -1, v0
+; GFX8-NEXT: v_subrev_u32_e32 v4, vcc, s4, v4
+; GFX8-NEXT: s_mov_b32 s5, 0x80000000
+; GFX8-NEXT: v_min_i32_e32 v5, -1, v0
+; GFX8-NEXT: v_max_i32_e32 v2, v4, v2
+; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s5, v5
+; GFX8-NEXT: v_min_i32_e32 v2, v2, v5
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_max_i32_e32 v2, -1, v1
+; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s4, v2
+; GFX8-NEXT: v_min_i32_e32 v4, -1, v1
+; GFX8-NEXT: v_subrev_u32_e32 v4, vcc, s5, v4
+; GFX8-NEXT: v_max_i32_e32 v2, v2, v3
+; GFX8-NEXT: v_min_i32_e32 v2, v2, v4
+; GFX8-NEXT: v_sub_u32_e32 v1, vcc, v1, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_v2i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_max_i32_e32 v4, -1, v0
+; GFX9-NEXT: v_subrev_u32_e32 v4, s4, v4
+; GFX9-NEXT: s_mov_b32 s5, 0x80000000
+; GFX9-NEXT: v_min_i32_e32 v5, -1, v0
+; GFX9-NEXT: v_max_i32_e32 v2, v4, v2
+; GFX9-NEXT: v_subrev_u32_e32 v5, s5, v5
+; GFX9-NEXT: v_min_i32_e32 v2, v2, v5
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v2
+; GFX9-NEXT: v_max_i32_e32 v2, -1, v1
+; GFX9-NEXT: v_subrev_u32_e32 v2, s4, v2
+; GFX9-NEXT: v_min_i32_e32 v4, -1, v1
+; GFX9-NEXT: v_subrev_u32_e32 v4, s5, v4
+; GFX9-NEXT: v_max_i32_e32 v2, v2, v3
+; GFX9-NEXT: v_min_i32_e32 v2, v2, v4
+; GFX9-NEXT: v_sub_u32_e32 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_v2i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_max_i32_e32 v4, -1, v0
+; GFX10-NEXT: v_max_i32_e32 v5, -1, v1
+; GFX10-NEXT: s_brev_b32 s4, -2
+; GFX10-NEXT: v_min_i32_e32 v6, -1, v0
+; GFX10-NEXT: v_min_i32_e32 v7, -1, v1
+; GFX10-NEXT: v_subrev_nc_u32_e32 v4, s4, v4
+; GFX10-NEXT: v_subrev_nc_u32_e32 v5, s4, v5
+; GFX10-NEXT: s_mov_b32 s4, 0x80000000
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_subrev_nc_u32_e32 v6, s4, v6
+; GFX10-NEXT: v_max_i32_e32 v11, v4, v2
+; GFX10-NEXT: v_subrev_nc_u32_e32 v7, s4, v7
+; GFX10-NEXT: v_max_i32_e32 v10, v5, v3
+; GFX10-NEXT: v_min_i32_e32 v2, v11, v6
+; GFX10-NEXT: v_min_i32_e32 v3, v10, v7
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v2
+; GFX10-NEXT: v_sub_nc_u32_e32 v1, v1, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+ ret <2 x i32> %result
+}
+
+define amdgpu_ps <2 x i32> @s_ssubsat_v2i32(<2 x i32> inreg %lhs, <2 x i32> inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_v2i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: s_cselect_b32 s6, s0, -1
+; GFX6-NEXT: s_sub_i32 s6, s6, s4
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s7, s0, -1
+; GFX6-NEXT: s_sub_i32 s7, s7, s5
+; GFX6-NEXT: s_cmp_gt_i32 s6, s2
+; GFX6-NEXT: s_cselect_b32 s2, s6, s2
+; GFX6-NEXT: s_cmp_lt_i32 s2, s7
+; GFX6-NEXT: s_cselect_b32 s2, s2, s7
+; GFX6-NEXT: s_sub_i32 s0, s0, s2
+; GFX6-NEXT: s_cmp_gt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s2, s1, -1
+; GFX6-NEXT: s_sub_i32 s2, s2, s4
+; GFX6-NEXT: s_cmp_lt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s4, s1, -1
+; GFX6-NEXT: s_sub_i32 s4, s4, s5
+; GFX6-NEXT: s_cmp_gt_i32 s2, s3
+; GFX6-NEXT: s_cselect_b32 s2, s2, s3
+; GFX6-NEXT: s_cmp_lt_i32 s2, s4
+; GFX6-NEXT: s_cselect_b32 s2, s2, s4
+; GFX6-NEXT: s_sub_i32 s1, s1, s2
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_v2i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_gt_i32 s0, -1
+; GFX8-NEXT: s_brev_b32 s4, -2
+; GFX8-NEXT: s_cselect_b32 s6, s0, -1
+; GFX8-NEXT: s_sub_i32 s6, s6, s4
+; GFX8-NEXT: s_cmp_lt_i32 s0, -1
+; GFX8-NEXT: s_mov_b32 s5, 0x80000000
+; GFX8-NEXT: s_cselect_b32 s7, s0, -1
+; GFX8-NEXT: s_sub_i32 s7, s7, s5
+; GFX8-NEXT: s_cmp_gt_i32 s6, s2
+; GFX8-NEXT: s_cselect_b32 s2, s6, s2
+; GFX8-NEXT: s_cmp_lt_i32 s2, s7
+; GFX8-NEXT: s_cselect_b32 s2, s2, s7
+; GFX8-NEXT: s_sub_i32 s0, s0, s2
+; GFX8-NEXT: s_cmp_gt_i32 s1, -1
+; GFX8-NEXT: s_cselect_b32 s2, s1, -1
+; GFX8-NEXT: s_sub_i32 s2, s2, s4
+; GFX8-NEXT: s_cmp_lt_i32 s1, -1
+; GFX8-NEXT: s_cselect_b32 s4, s1, -1
+; GFX8-NEXT: s_sub_i32 s4, s4, s5
+; GFX8-NEXT: s_cmp_gt_i32 s2, s3
+; GFX8-NEXT: s_cselect_b32 s2, s2, s3
+; GFX8-NEXT: s_cmp_lt_i32 s2, s4
+; GFX8-NEXT: s_cselect_b32 s2, s2, s4
+; GFX8-NEXT: s_sub_i32 s1, s1, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_v2i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_gt_i32 s0, -1
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: s_cselect_b32 s6, s0, -1
+; GFX9-NEXT: s_sub_i32 s6, s6, s4
+; GFX9-NEXT: s_cmp_lt_i32 s0, -1
+; GFX9-NEXT: s_mov_b32 s5, 0x80000000
+; GFX9-NEXT: s_cselect_b32 s7, s0, -1
+; GFX9-NEXT: s_sub_i32 s7, s7, s5
+; GFX9-NEXT: s_cmp_gt_i32 s6, s2
+; GFX9-NEXT: s_cselect_b32 s2, s6, s2
+; GFX9-NEXT: s_cmp_lt_i32 s2, s7
+; GFX9-NEXT: s_cselect_b32 s2, s2, s7
+; GFX9-NEXT: s_sub_i32 s0, s0, s2
+; GFX9-NEXT: s_cmp_gt_i32 s1, -1
+; GFX9-NEXT: s_cselect_b32 s2, s1, -1
+; GFX9-NEXT: s_sub_i32 s2, s2, s4
+; GFX9-NEXT: s_cmp_lt_i32 s1, -1
+; GFX9-NEXT: s_cselect_b32 s4, s1, -1
+; GFX9-NEXT: s_sub_i32 s4, s4, s5
+; GFX9-NEXT: s_cmp_gt_i32 s2, s3
+; GFX9-NEXT: s_cselect_b32 s2, s2, s3
+; GFX9-NEXT: s_cmp_lt_i32 s2, s4
+; GFX9-NEXT: s_cselect_b32 s2, s2, s4
+; GFX9-NEXT: s_sub_i32 s1, s1, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_v2i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_gt_i32 s0, -1
+; GFX10-NEXT: s_brev_b32 s4, -2
+; GFX10-NEXT: s_cselect_b32 s5, s0, -1
+; GFX10-NEXT: s_mov_b32 s6, 0x80000000
+; GFX10-NEXT: s_sub_i32 s5, s5, s4
+; GFX10-NEXT: s_cmp_lt_i32 s0, -1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s7, s0, -1
+; GFX10-NEXT: s_sub_i32 s7, s7, s6
+; GFX10-NEXT: s_cmp_gt_i32 s5, s2
+; GFX10-NEXT: s_cselect_b32 s2, s5, s2
+; GFX10-NEXT: s_cmp_lt_i32 s2, s7
+; GFX10-NEXT: s_cselect_b32 s2, s2, s7
+; GFX10-NEXT: s_sub_i32 s0, s0, s2
+; GFX10-NEXT: s_cmp_gt_i32 s1, -1
+; GFX10-NEXT: s_cselect_b32 s2, s1, -1
+; GFX10-NEXT: s_sub_i32 s2, s2, s4
+; GFX10-NEXT: s_cmp_lt_i32 s1, -1
+; GFX10-NEXT: s_cselect_b32 s4, s1, -1
+; GFX10-NEXT: s_sub_i32 s4, s4, s6
+; GFX10-NEXT: s_cmp_gt_i32 s2, s3
+; GFX10-NEXT: s_cselect_b32 s2, s2, s3
+; GFX10-NEXT: s_cmp_lt_i32 s2, s4
+; GFX10-NEXT: s_cselect_b32 s2, s2, s4
+; GFX10-NEXT: s_sub_i32 s1, s1, s2
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+ ret <2 x i32> %result
+}
+
+define <3 x i32> @v_ssubsat_v3i32(<3 x i32> %lhs, <3 x i32> %rhs) {
+; GFX6-LABEL: v_ssubsat_v3i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v6, -1, v0
+; GFX6-NEXT: v_subrev_i32_e32 v6, vcc, s4, v6
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v7, -1, v0
+; GFX6-NEXT: v_max_i32_e32 v3, v6, v3
+; GFX6-NEXT: v_subrev_i32_e32 v7, vcc, s5, v7
+; GFX6-NEXT: v_min_i32_e32 v3, v3, v7
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v3
+; GFX6-NEXT: v_max_i32_e32 v3, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v3, vcc, s4, v3
+; GFX6-NEXT: v_min_i32_e32 v6, -1, v1
+; GFX6-NEXT: v_max_i32_e32 v3, v3, v4
+; GFX6-NEXT: v_subrev_i32_e32 v6, vcc, s5, v6
+; GFX6-NEXT: v_min_i32_e32 v3, v3, v6
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v3
+; GFX6-NEXT: v_max_i32_e32 v3, -1, v2
+; GFX6-NEXT: v_subrev_i32_e32 v3, vcc, s4, v3
+; GFX6-NEXT: v_min_i32_e32 v4, -1, v2
+; GFX6-NEXT: v_subrev_i32_e32 v4, vcc, s5, v4
+; GFX6-NEXT: v_max_i32_e32 v3, v3, v5
+; GFX6-NEXT: v_min_i32_e32 v3, v3, v4
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v3
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_v3i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_brev_b32 s4, -2
+; GFX8-NEXT: v_max_i32_e32 v6, -1, v0
+; GFX8-NEXT: v_subrev_u32_e32 v6, vcc, s4, v6
+; GFX8-NEXT: s_mov_b32 s5, 0x80000000
+; GFX8-NEXT: v_min_i32_e32 v7, -1, v0
+; GFX8-NEXT: v_max_i32_e32 v3, v6, v3
+; GFX8-NEXT: v_subrev_u32_e32 v7, vcc, s5, v7
+; GFX8-NEXT: v_min_i32_e32 v3, v3, v7
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v3
+; GFX8-NEXT: v_max_i32_e32 v3, -1, v1
+; GFX8-NEXT: v_subrev_u32_e32 v3, vcc, s4, v3
+; GFX8-NEXT: v_min_i32_e32 v6, -1, v1
+; GFX8-NEXT: v_max_i32_e32 v3, v3, v4
+; GFX8-NEXT: v_subrev_u32_e32 v6, vcc, s5, v6
+; GFX8-NEXT: v_min_i32_e32 v3, v3, v6
+; GFX8-NEXT: v_sub_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT: v_max_i32_e32 v3, -1, v2
+; GFX8-NEXT: v_subrev_u32_e32 v3, vcc, s4, v3
+; GFX8-NEXT: v_min_i32_e32 v4, -1, v2
+; GFX8-NEXT: v_subrev_u32_e32 v4, vcc, s5, v4
+; GFX8-NEXT: v_max_i32_e32 v3, v3, v5
+; GFX8-NEXT: v_min_i32_e32 v3, v3, v4
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, v2, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_v3i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_max_i32_e32 v6, -1, v0
+; GFX9-NEXT: v_subrev_u32_e32 v6, s4, v6
+; GFX9-NEXT: s_mov_b32 s5, 0x80000000
+; GFX9-NEXT: v_min_i32_e32 v7, -1, v0
+; GFX9-NEXT: v_max_i32_e32 v3, v6, v3
+; GFX9-NEXT: v_subrev_u32_e32 v7, s5, v7
+; GFX9-NEXT: v_min_i32_e32 v3, v3, v7
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v3
+; GFX9-NEXT: v_max_i32_e32 v3, -1, v1
+; GFX9-NEXT: v_subrev_u32_e32 v3, s4, v3
+; GFX9-NEXT: v_min_i32_e32 v6, -1, v1
+; GFX9-NEXT: v_max_i32_e32 v3, v3, v4
+; GFX9-NEXT: v_subrev_u32_e32 v6, s5, v6
+; GFX9-NEXT: v_min_i32_e32 v3, v3, v6
+; GFX9-NEXT: v_sub_u32_e32 v1, v1, v3
+; GFX9-NEXT: v_max_i32_e32 v3, -1, v2
+; GFX9-NEXT: v_subrev_u32_e32 v3, s4, v3
+; GFX9-NEXT: v_min_i32_e32 v4, -1, v2
+; GFX9-NEXT: v_subrev_u32_e32 v4, s5, v4
+; GFX9-NEXT: v_max_i32_e32 v3, v3, v5
+; GFX9-NEXT: v_min_i32_e32 v3, v3, v4
+; GFX9-NEXT: v_sub_u32_e32 v2, v2, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_v3i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_max_i32_e32 v6, -1, v0
+; GFX10-NEXT: v_max_i32_e32 v8, -1, v1
+; GFX10-NEXT: v_max_i32_e32 v9, -1, v2
+; GFX10-NEXT: s_brev_b32 s4, -2
+; GFX10-NEXT: v_min_i32_e32 v7, -1, v0
+; GFX10-NEXT: v_subrev_nc_u32_e32 v6, s4, v6
+; GFX10-NEXT: v_subrev_nc_u32_e32 v15, s4, v8
+; GFX10-NEXT: v_subrev_nc_u32_e32 v19, s4, v9
+; GFX10-NEXT: v_min_i32_e32 v10, -1, v1
+; GFX10-NEXT: v_min_i32_e32 v11, -1, v2
+; GFX10-NEXT: s_mov_b32 s5, 0x80000000
+; GFX10-NEXT: v_max_i32_e32 v14, v6, v3
+; GFX10-NEXT: v_subrev_nc_u32_e32 v7, s5, v7
+; GFX10-NEXT: v_subrev_nc_u32_e32 v6, s5, v10
+; GFX10-NEXT: v_max_i32_e32 v4, v15, v4
+; GFX10-NEXT: v_subrev_nc_u32_e32 v8, s5, v11
+; GFX10-NEXT: v_max_i32_e32 v5, v19, v5
+; GFX10-NEXT: v_min_i32_e32 v3, v14, v7
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_i32_e32 v4, v4, v6
+; GFX10-NEXT: v_min_i32_e32 v5, v5, v8
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v3
+; GFX10-NEXT: v_sub_nc_u32_e32 v1, v1, v4
+; GFX10-NEXT: v_sub_nc_u32_e32 v2, v2, v5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <3 x i32> @llvm.ssub.sat.v3i32(<3 x i32> %lhs, <3 x i32> %rhs)
+ ret <3 x i32> %result
+}
+
+define amdgpu_ps <3 x i32> @s_ssubsat_v3i32(<3 x i32> inreg %lhs, <3 x i32> inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_v3i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_brev_b32 s6, -2
+; GFX6-NEXT: s_cselect_b32 s8, s0, -1
+; GFX6-NEXT: s_sub_i32 s8, s8, s6
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_mov_b32 s7, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s9, s0, -1
+; GFX6-NEXT: s_sub_i32 s9, s9, s7
+; GFX6-NEXT: s_cmp_gt_i32 s8, s3
+; GFX6-NEXT: s_cselect_b32 s3, s8, s3
+; GFX6-NEXT: s_cmp_lt_i32 s3, s9
+; GFX6-NEXT: s_cselect_b32 s3, s3, s9
+; GFX6-NEXT: s_sub_i32 s0, s0, s3
+; GFX6-NEXT: s_cmp_gt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s3, s1, -1
+; GFX6-NEXT: s_sub_i32 s3, s3, s6
+; GFX6-NEXT: s_cmp_lt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s8, s1, -1
+; GFX6-NEXT: s_sub_i32 s8, s8, s7
+; GFX6-NEXT: s_cmp_gt_i32 s3, s4
+; GFX6-NEXT: s_cselect_b32 s3, s3, s4
+; GFX6-NEXT: s_cmp_lt_i32 s3, s8
+; GFX6-NEXT: s_cselect_b32 s3, s3, s8
+; GFX6-NEXT: s_sub_i32 s1, s1, s3
+; GFX6-NEXT: s_cmp_gt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s3, s2, -1
+; GFX6-NEXT: s_sub_i32 s3, s3, s6
+; GFX6-NEXT: s_cmp_lt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s4, s2, -1
+; GFX6-NEXT: s_sub_i32 s4, s4, s7
+; GFX6-NEXT: s_cmp_gt_i32 s3, s5
+; GFX6-NEXT: s_cselect_b32 s3, s3, s5
+; GFX6-NEXT: s_cmp_lt_i32 s3, s4
+; GFX6-NEXT: s_cselect_b32 s3, s3, s4
+; GFX6-NEXT: s_sub_i32 s2, s2, s3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_v3i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_gt_i32 s0, -1
+; GFX8-NEXT: s_brev_b32 s6, -2
+; GFX8-NEXT: s_cselect_b32 s8, s0, -1
+; GFX8-NEXT: s_sub_i32 s8, s8, s6
+; GFX8-NEXT: s_cmp_lt_i32 s0, -1
+; GFX8-NEXT: s_mov_b32 s7, 0x80000000
+; GFX8-NEXT: s_cselect_b32 s9, s0, -1
+; GFX8-NEXT: s_sub_i32 s9, s9, s7
+; GFX8-NEXT: s_cmp_gt_i32 s8, s3
+; GFX8-NEXT: s_cselect_b32 s3, s8, s3
+; GFX8-NEXT: s_cmp_lt_i32 s3, s9
+; GFX8-NEXT: s_cselect_b32 s3, s3, s9
+; GFX8-NEXT: s_sub_i32 s0, s0, s3
+; GFX8-NEXT: s_cmp_gt_i32 s1, -1
+; GFX8-NEXT: s_cselect_b32 s3, s1, -1
+; GFX8-NEXT: s_sub_i32 s3, s3, s6
+; GFX8-NEXT: s_cmp_lt_i32 s1, -1
+; GFX8-NEXT: s_cselect_b32 s8, s1, -1
+; GFX8-NEXT: s_sub_i32 s8, s8, s7
+; GFX8-NEXT: s_cmp_gt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_cmp_lt_i32 s3, s8
+; GFX8-NEXT: s_cselect_b32 s3, s3, s8
+; GFX8-NEXT: s_sub_i32 s1, s1, s3
+; GFX8-NEXT: s_cmp_gt_i32 s2, -1
+; GFX8-NEXT: s_cselect_b32 s3, s2, -1
+; GFX8-NEXT: s_sub_i32 s3, s3, s6
+; GFX8-NEXT: s_cmp_lt_i32 s2, -1
+; GFX8-NEXT: s_cselect_b32 s4, s2, -1
+; GFX8-NEXT: s_sub_i32 s4, s4, s7
+; GFX8-NEXT: s_cmp_gt_i32 s3, s5
+; GFX8-NEXT: s_cselect_b32 s3, s3, s5
+; GFX8-NEXT: s_cmp_lt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_sub_i32 s2, s2, s3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_v3i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_gt_i32 s0, -1
+; GFX9-NEXT: s_brev_b32 s6, -2
+; GFX9-NEXT: s_cselect_b32 s8, s0, -1
+; GFX9-NEXT: s_sub_i32 s8, s8, s6
+; GFX9-NEXT: s_cmp_lt_i32 s0, -1
+; GFX9-NEXT: s_mov_b32 s7, 0x80000000
+; GFX9-NEXT: s_cselect_b32 s9, s0, -1
+; GFX9-NEXT: s_sub_i32 s9, s9, s7
+; GFX9-NEXT: s_cmp_gt_i32 s8, s3
+; GFX9-NEXT: s_cselect_b32 s3, s8, s3
+; GFX9-NEXT: s_cmp_lt_i32 s3, s9
+; GFX9-NEXT: s_cselect_b32 s3, s3, s9
+; GFX9-NEXT: s_sub_i32 s0, s0, s3
+; GFX9-NEXT: s_cmp_gt_i32 s1, -1
+; GFX9-NEXT: s_cselect_b32 s3, s1, -1
+; GFX9-NEXT: s_sub_i32 s3, s3, s6
+; GFX9-NEXT: s_cmp_lt_i32 s1, -1
+; GFX9-NEXT: s_cselect_b32 s8, s1, -1
+; GFX9-NEXT: s_sub_i32 s8, s8, s7
+; GFX9-NEXT: s_cmp_gt_i32 s3, s4
+; GFX9-NEXT: s_cselect_b32 s3, s3, s4
+; GFX9-NEXT: s_cmp_lt_i32 s3, s8
+; GFX9-NEXT: s_cselect_b32 s3, s3, s8
+; GFX9-NEXT: s_sub_i32 s1, s1, s3
+; GFX9-NEXT: s_cmp_gt_i32 s2, -1
+; GFX9-NEXT: s_cselect_b32 s3, s2, -1
+; GFX9-NEXT: s_sub_i32 s3, s3, s6
+; GFX9-NEXT: s_cmp_lt_i32 s2, -1
+; GFX9-NEXT: s_cselect_b32 s4, s2, -1
+; GFX9-NEXT: s_sub_i32 s4, s4, s7
+; GFX9-NEXT: s_cmp_gt_i32 s3, s5
+; GFX9-NEXT: s_cselect_b32 s3, s3, s5
+; GFX9-NEXT: s_cmp_lt_i32 s3, s4
+; GFX9-NEXT: s_cselect_b32 s3, s3, s4
+; GFX9-NEXT: s_sub_i32 s2, s2, s3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_v3i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_gt_i32 s0, -1
+; GFX10-NEXT: s_brev_b32 s6, -2
+; GFX10-NEXT: s_cselect_b32 s7, s0, -1
+; GFX10-NEXT: s_mov_b32 s8, 0x80000000
+; GFX10-NEXT: s_sub_i32 s7, s7, s6
+; GFX10-NEXT: s_cmp_lt_i32 s0, -1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s9, s0, -1
+; GFX10-NEXT: s_sub_i32 s9, s9, s8
+; GFX10-NEXT: s_cmp_gt_i32 s7, s3
+; GFX10-NEXT: s_cselect_b32 s3, s7, s3
+; GFX10-NEXT: s_cmp_lt_i32 s3, s9
+; GFX10-NEXT: s_cselect_b32 s3, s3, s9
+; GFX10-NEXT: s_sub_i32 s0, s0, s3
+; GFX10-NEXT: s_cmp_gt_i32 s1, -1
+; GFX10-NEXT: s_cselect_b32 s3, s1, -1
+; GFX10-NEXT: s_sub_i32 s3, s3, s6
+; GFX10-NEXT: s_cmp_lt_i32 s1, -1
+; GFX10-NEXT: s_cselect_b32 s7, s1, -1
+; GFX10-NEXT: s_sub_i32 s7, s7, s8
+; GFX10-NEXT: s_cmp_gt_i32 s3, s4
+; GFX10-NEXT: s_cselect_b32 s3, s3, s4
+; GFX10-NEXT: s_cmp_lt_i32 s3, s7
+; GFX10-NEXT: s_cselect_b32 s3, s3, s7
+; GFX10-NEXT: s_sub_i32 s1, s1, s3
+; GFX10-NEXT: s_cmp_gt_i32 s2, -1
+; GFX10-NEXT: s_cselect_b32 s3, s2, -1
+; GFX10-NEXT: s_sub_i32 s3, s3, s6
+; GFX10-NEXT: s_cmp_lt_i32 s2, -1
+; GFX10-NEXT: s_cselect_b32 s4, s2, -1
+; GFX10-NEXT: s_sub_i32 s4, s4, s8
+; GFX10-NEXT: s_cmp_gt_i32 s3, s5
+; GFX10-NEXT: s_cselect_b32 s3, s3, s5
+; GFX10-NEXT: s_cmp_lt_i32 s3, s4
+; GFX10-NEXT: s_cselect_b32 s3, s3, s4
+; GFX10-NEXT: s_sub_i32 s2, s2, s3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <3 x i32> @llvm.ssub.sat.v3i32(<3 x i32> %lhs, <3 x i32> %rhs)
+ ret <3 x i32> %result
+}
+
+define <4 x i32> @v_ssubsat_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; GFX6-LABEL: v_ssubsat_v4i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v8, -1, v0
+; GFX6-NEXT: v_subrev_i32_e32 v8, vcc, s4, v8
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v9, -1, v0
+; GFX6-NEXT: v_max_i32_e32 v4, v8, v4
+; GFX6-NEXT: v_subrev_i32_e32 v9, vcc, s5, v9
+; GFX6-NEXT: v_min_i32_e32 v4, v4, v9
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
+; GFX6-NEXT: v_max_i32_e32 v4, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v4, vcc, s4, v4
+; GFX6-NEXT: v_min_i32_e32 v8, -1, v1
+; GFX6-NEXT: v_max_i32_e32 v4, v4, v5
+; GFX6-NEXT: v_subrev_i32_e32 v8, vcc, s5, v8
+; GFX6-NEXT: v_min_i32_e32 v4, v4, v8
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v4
+; GFX6-NEXT: v_max_i32_e32 v4, -1, v2
+; GFX6-NEXT: v_subrev_i32_e32 v4, vcc, s4, v4
+; GFX6-NEXT: v_min_i32_e32 v5, -1, v2
+; GFX6-NEXT: v_subrev_i32_e32 v5, vcc, s5, v5
+; GFX6-NEXT: v_max_i32_e32 v4, v4, v6
+; GFX6-NEXT: v_min_i32_e32 v4, v4, v5
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v4
+; GFX6-NEXT: v_max_i32_e32 v4, -1, v3
+; GFX6-NEXT: v_subrev_i32_e32 v4, vcc, 0x7fffffff, v4
+; GFX6-NEXT: v_min_i32_e32 v5, -1, v3
+; GFX6-NEXT: v_subrev_i32_e32 v5, vcc, 0x80000000, v5
+; GFX6-NEXT: v_max_i32_e32 v4, v4, v7
+; GFX6-NEXT: v_min_i32_e32 v4, v4, v5
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, v3, v4
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_v4i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_brev_b32 s4, -2
+; GFX8-NEXT: v_max_i32_e32 v8, -1, v0
+; GFX8-NEXT: v_subrev_u32_e32 v8, vcc, s4, v8
+; GFX8-NEXT: s_mov_b32 s5, 0x80000000
+; GFX8-NEXT: v_min_i32_e32 v9, -1, v0
+; GFX8-NEXT: v_max_i32_e32 v4, v8, v4
+; GFX8-NEXT: v_subrev_u32_e32 v9, vcc, s5, v9
+; GFX8-NEXT: v_min_i32_e32 v4, v4, v9
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v4
+; GFX8-NEXT: v_max_i32_e32 v4, -1, v1
+; GFX8-NEXT: v_subrev_u32_e32 v4, vcc, s4, v4
+; GFX8-NEXT: v_min_i32_e32 v8, -1, v1
+; GFX8-NEXT: v_max_i32_e32 v4, v4, v5
+; GFX8-NEXT: v_subrev_u32_e32 v8, vcc, s5, v8
+; GFX8-NEXT: v_min_i32_e32 v4, v4, v8
+; GFX8-NEXT: v_sub_u32_e32 v1, vcc, v1, v4
+; GFX8-NEXT: v_max_i32_e32 v4, -1, v2
+; GFX8-NEXT: v_subrev_u32_e32 v4, vcc, s4, v4
+; GFX8-NEXT: v_min_i32_e32 v5, -1, v2
+; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s5, v5
+; GFX8-NEXT: v_max_i32_e32 v4, v4, v6
+; GFX8-NEXT: v_min_i32_e32 v4, v4, v5
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, v2, v4
+; GFX8-NEXT: v_max_i32_e32 v4, -1, v3
+; GFX8-NEXT: v_subrev_u32_e32 v4, vcc, 0x7fffffff, v4
+; GFX8-NEXT: v_min_i32_e32 v5, -1, v3
+; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, 0x80000000, v5
+; GFX8-NEXT: v_max_i32_e32 v4, v4, v7
+; GFX8-NEXT: v_min_i32_e32 v4, v4, v5
+; GFX8-NEXT: v_sub_u32_e32 v3, vcc, v3, v4
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_v4i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_max_i32_e32 v8, -1, v0
+; GFX9-NEXT: v_subrev_u32_e32 v8, s4, v8
+; GFX9-NEXT: s_mov_b32 s5, 0x80000000
+; GFX9-NEXT: v_min_i32_e32 v9, -1, v0
+; GFX9-NEXT: v_max_i32_e32 v4, v8, v4
+; GFX9-NEXT: v_subrev_u32_e32 v9, s5, v9
+; GFX9-NEXT: v_min_i32_e32 v4, v4, v9
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v4
+; GFX9-NEXT: v_max_i32_e32 v4, -1, v1
+; GFX9-NEXT: v_subrev_u32_e32 v4, s4, v4
+; GFX9-NEXT: v_min_i32_e32 v8, -1, v1
+; GFX9-NEXT: v_max_i32_e32 v4, v4, v5
+; GFX9-NEXT: v_subrev_u32_e32 v8, s5, v8
+; GFX9-NEXT: v_min_i32_e32 v4, v4, v8
+; GFX9-NEXT: v_sub_u32_e32 v1, v1, v4
+; GFX9-NEXT: v_max_i32_e32 v4, -1, v2
+; GFX9-NEXT: v_subrev_u32_e32 v4, s4, v4
+; GFX9-NEXT: v_min_i32_e32 v5, -1, v2
+; GFX9-NEXT: v_subrev_u32_e32 v5, s5, v5
+; GFX9-NEXT: v_max_i32_e32 v4, v4, v6
+; GFX9-NEXT: v_min_i32_e32 v4, v4, v5
+; GFX9-NEXT: v_sub_u32_e32 v2, v2, v4
+; GFX9-NEXT: v_max_i32_e32 v4, -1, v3
+; GFX9-NEXT: v_subrev_u32_e32 v4, 0x7fffffff, v4
+; GFX9-NEXT: v_min_i32_e32 v5, -1, v3
+; GFX9-NEXT: v_subrev_u32_e32 v5, 0x80000000, v5
+; GFX9-NEXT: v_max_i32_e32 v4, v4, v7
+; GFX9-NEXT: v_min_i32_e32 v4, v4, v5
+; GFX9-NEXT: v_sub_u32_e32 v3, v3, v4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_v4i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_max_i32_e32 v8, -1, v0
+; GFX10-NEXT: s_brev_b32 s4, -2
+; GFX10-NEXT: v_max_i32_e32 v10, -1, v1
+; GFX10-NEXT: v_max_i32_e32 v12, -1, v3
+; GFX10-NEXT: v_min_i32_e32 v9, -1, v0
+; GFX10-NEXT: v_subrev_nc_u32_e32 v15, s4, v8
+; GFX10-NEXT: v_max_i32_e32 v8, -1, v2
+; GFX10-NEXT: v_subrev_nc_u32_e32 v10, s4, v10
+; GFX10-NEXT: v_subrev_nc_u32_e32 v12, 0x7fffffff, v12
+; GFX10-NEXT: v_min_i32_e32 v11, -1, v1
+; GFX10-NEXT: v_min_i32_e32 v13, -1, v2
+; GFX10-NEXT: v_subrev_nc_u32_e32 v8, s4, v8
+; GFX10-NEXT: v_min_i32_e32 v14, -1, v3
+; GFX10-NEXT: s_mov_b32 s5, 0x80000000
+; GFX10-NEXT: v_max_i32_e32 v4, v15, v4
+; GFX10-NEXT: v_subrev_nc_u32_e32 v9, s5, v9
+; GFX10-NEXT: v_max_i32_e32 v5, v10, v5
+; GFX10-NEXT: v_subrev_nc_u32_e32 v11, s5, v11
+; GFX10-NEXT: v_max_i32_e32 v15, v8, v6
+; GFX10-NEXT: v_subrev_nc_u32_e32 v10, s5, v13
+; GFX10-NEXT: v_subrev_nc_u32_e32 v8, 0x80000000, v14
+; GFX10-NEXT: v_max_i32_e32 v7, v12, v7
+; GFX10-NEXT: v_min_i32_e32 v19, v4, v9
+; GFX10-NEXT: v_min_i32_e32 v11, v5, v11
+; GFX10-NEXT: v_min_i32_e32 v15, v15, v10
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_i32_e32 v6, v7, v8
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v19
+; GFX10-NEXT: v_sub_nc_u32_e32 v1, v1, v11
+; GFX10-NEXT: v_sub_nc_u32_e32 v2, v2, v15
+; GFX10-NEXT: v_sub_nc_u32_e32 v3, v3, v6
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+ ret <4 x i32> %result
+}
+
+define amdgpu_ps <4 x i32> @s_ssubsat_v4i32(<4 x i32> inreg %lhs, <4 x i32> inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_v4i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_brev_b32 s8, -2
+; GFX6-NEXT: s_cselect_b32 s10, s0, -1
+; GFX6-NEXT: s_sub_i32 s10, s10, s8
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_mov_b32 s9, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s11, s0, -1
+; GFX6-NEXT: s_sub_i32 s11, s11, s9
+; GFX6-NEXT: s_cmp_gt_i32 s10, s4
+; GFX6-NEXT: s_cselect_b32 s4, s10, s4
+; GFX6-NEXT: s_cmp_lt_i32 s4, s11
+; GFX6-NEXT: s_cselect_b32 s4, s4, s11
+; GFX6-NEXT: s_sub_i32 s0, s0, s4
+; GFX6-NEXT: s_cmp_gt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s4, s1, -1
+; GFX6-NEXT: s_sub_i32 s4, s4, s8
+; GFX6-NEXT: s_cmp_lt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s10, s1, -1
+; GFX6-NEXT: s_sub_i32 s10, s10, s9
+; GFX6-NEXT: s_cmp_gt_i32 s4, s5
+; GFX6-NEXT: s_cselect_b32 s4, s4, s5
+; GFX6-NEXT: s_cmp_lt_i32 s4, s10
+; GFX6-NEXT: s_cselect_b32 s4, s4, s10
+; GFX6-NEXT: s_sub_i32 s1, s1, s4
+; GFX6-NEXT: s_cmp_gt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s4, s2, -1
+; GFX6-NEXT: s_sub_i32 s4, s4, s8
+; GFX6-NEXT: s_cmp_lt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s5, s2, -1
+; GFX6-NEXT: s_sub_i32 s5, s5, s9
+; GFX6-NEXT: s_cmp_gt_i32 s4, s6
+; GFX6-NEXT: s_cselect_b32 s4, s4, s6
+; GFX6-NEXT: s_cmp_lt_i32 s4, s5
+; GFX6-NEXT: s_cselect_b32 s4, s4, s5
+; GFX6-NEXT: s_sub_i32 s2, s2, s4
+; GFX6-NEXT: s_cmp_gt_i32 s3, -1
+; GFX6-NEXT: s_cselect_b32 s4, s3, -1
+; GFX6-NEXT: s_sub_i32 s4, s4, s8
+; GFX6-NEXT: s_cmp_lt_i32 s3, -1
+; GFX6-NEXT: s_cselect_b32 s5, s3, -1
+; GFX6-NEXT: s_sub_i32 s5, s5, s9
+; GFX6-NEXT: s_cmp_gt_i32 s4, s7
+; GFX6-NEXT: s_cselect_b32 s4, s4, s7
+; GFX6-NEXT: s_cmp_lt_i32 s4, s5
+; GFX6-NEXT: s_cselect_b32 s4, s4, s5
+; GFX6-NEXT: s_sub_i32 s3, s3, s4
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_v4i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_gt_i32 s0, -1
+; GFX8-NEXT: s_brev_b32 s8, -2
+; GFX8-NEXT: s_cselect_b32 s10, s0, -1
+; GFX8-NEXT: s_sub_i32 s10, s10, s8
+; GFX8-NEXT: s_cmp_lt_i32 s0, -1
+; GFX8-NEXT: s_mov_b32 s9, 0x80000000
+; GFX8-NEXT: s_cselect_b32 s11, s0, -1
+; GFX8-NEXT: s_sub_i32 s11, s11, s9
+; GFX8-NEXT: s_cmp_gt_i32 s10, s4
+; GFX8-NEXT: s_cselect_b32 s4, s10, s4
+; GFX8-NEXT: s_cmp_lt_i32 s4, s11
+; GFX8-NEXT: s_cselect_b32 s4, s4, s11
+; GFX8-NEXT: s_sub_i32 s0, s0, s4
+; GFX8-NEXT: s_cmp_gt_i32 s1, -1
+; GFX8-NEXT: s_cselect_b32 s4, s1, -1
+; GFX8-NEXT: s_sub_i32 s4, s4, s8
+; GFX8-NEXT: s_cmp_lt_i32 s1, -1
+; GFX8-NEXT: s_cselect_b32 s10, s1, -1
+; GFX8-NEXT: s_sub_i32 s10, s10, s9
+; GFX8-NEXT: s_cmp_gt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_cmp_lt_i32 s4, s10
+; GFX8-NEXT: s_cselect_b32 s4, s4, s10
+; GFX8-NEXT: s_sub_i32 s1, s1, s4
+; GFX8-NEXT: s_cmp_gt_i32 s2, -1
+; GFX8-NEXT: s_cselect_b32 s4, s2, -1
+; GFX8-NEXT: s_sub_i32 s4, s4, s8
+; GFX8-NEXT: s_cmp_lt_i32 s2, -1
+; GFX8-NEXT: s_cselect_b32 s5, s2, -1
+; GFX8-NEXT: s_sub_i32 s5, s5, s9
+; GFX8-NEXT: s_cmp_gt_i32 s4, s6
+; GFX8-NEXT: s_cselect_b32 s4, s4, s6
+; GFX8-NEXT: s_cmp_lt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_sub_i32 s2, s2, s4
+; GFX8-NEXT: s_cmp_gt_i32 s3, -1
+; GFX8-NEXT: s_cselect_b32 s4, s3, -1
+; GFX8-NEXT: s_sub_i32 s4, s4, s8
+; GFX8-NEXT: s_cmp_lt_i32 s3, -1
+; GFX8-NEXT: s_cselect_b32 s5, s3, -1
+; GFX8-NEXT: s_sub_i32 s5, s5, s9
+; GFX8-NEXT: s_cmp_gt_i32 s4, s7
+; GFX8-NEXT: s_cselect_b32 s4, s4, s7
+; GFX8-NEXT: s_cmp_lt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_sub_i32 s3, s3, s4
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_v4i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_gt_i32 s0, -1
+; GFX9-NEXT: s_brev_b32 s8, -2
+; GFX9-NEXT: s_cselect_b32 s10, s0, -1
+; GFX9-NEXT: s_sub_i32 s10, s10, s8
+; GFX9-NEXT: s_cmp_lt_i32 s0, -1
+; GFX9-NEXT: s_mov_b32 s9, 0x80000000
+; GFX9-NEXT: s_cselect_b32 s11, s0, -1
+; GFX9-NEXT: s_sub_i32 s11, s11, s9
+; GFX9-NEXT: s_cmp_gt_i32 s10, s4
+; GFX9-NEXT: s_cselect_b32 s4, s10, s4
+; GFX9-NEXT: s_cmp_lt_i32 s4, s11
+; GFX9-NEXT: s_cselect_b32 s4, s4, s11
+; GFX9-NEXT: s_sub_i32 s0, s0, s4
+; GFX9-NEXT: s_cmp_gt_i32 s1, -1
+; GFX9-NEXT: s_cselect_b32 s4, s1, -1
+; GFX9-NEXT: s_sub_i32 s4, s4, s8
+; GFX9-NEXT: s_cmp_lt_i32 s1, -1
+; GFX9-NEXT: s_cselect_b32 s10, s1, -1
+; GFX9-NEXT: s_sub_i32 s10, s10, s9
+; GFX9-NEXT: s_cmp_gt_i32 s4, s5
+; GFX9-NEXT: s_cselect_b32 s4, s4, s5
+; GFX9-NEXT: s_cmp_lt_i32 s4, s10
+; GFX9-NEXT: s_cselect_b32 s4, s4, s10
+; GFX9-NEXT: s_sub_i32 s1, s1, s4
+; GFX9-NEXT: s_cmp_gt_i32 s2, -1
+; GFX9-NEXT: s_cselect_b32 s4, s2, -1
+; GFX9-NEXT: s_sub_i32 s4, s4, s8
+; GFX9-NEXT: s_cmp_lt_i32 s2, -1
+; GFX9-NEXT: s_cselect_b32 s5, s2, -1
+; GFX9-NEXT: s_sub_i32 s5, s5, s9
+; GFX9-NEXT: s_cmp_gt_i32 s4, s6
+; GFX9-NEXT: s_cselect_b32 s4, s4, s6
+; GFX9-NEXT: s_cmp_lt_i32 s4, s5
+; GFX9-NEXT: s_cselect_b32 s4, s4, s5
+; GFX9-NEXT: s_sub_i32 s2, s2, s4
+; GFX9-NEXT: s_cmp_gt_i32 s3, -1
+; GFX9-NEXT: s_cselect_b32 s4, s3, -1
+; GFX9-NEXT: s_sub_i32 s4, s4, s8
+; GFX9-NEXT: s_cmp_lt_i32 s3, -1
+; GFX9-NEXT: s_cselect_b32 s5, s3, -1
+; GFX9-NEXT: s_sub_i32 s5, s5, s9
+; GFX9-NEXT: s_cmp_gt_i32 s4, s7
+; GFX9-NEXT: s_cselect_b32 s4, s4, s7
+; GFX9-NEXT: s_cmp_lt_i32 s4, s5
+; GFX9-NEXT: s_cselect_b32 s4, s4, s5
+; GFX9-NEXT: s_sub_i32 s3, s3, s4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_v4i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_gt_i32 s0, -1
+; GFX10-NEXT: s_brev_b32 s8, -2
+; GFX10-NEXT: s_cselect_b32 s9, s0, -1
+; GFX10-NEXT: s_mov_b32 s10, 0x80000000
+; GFX10-NEXT: s_sub_i32 s9, s9, s8
+; GFX10-NEXT: s_cmp_lt_i32 s0, -1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s11, s0, -1
+; GFX10-NEXT: s_sub_i32 s11, s11, s10
+; GFX10-NEXT: s_cmp_gt_i32 s9, s4
+; GFX10-NEXT: s_cselect_b32 s4, s9, s4
+; GFX10-NEXT: s_cmp_lt_i32 s4, s11
+; GFX10-NEXT: s_cselect_b32 s4, s4, s11
+; GFX10-NEXT: s_sub_i32 s0, s0, s4
+; GFX10-NEXT: s_cmp_gt_i32 s1, -1
+; GFX10-NEXT: s_cselect_b32 s4, s1, -1
+; GFX10-NEXT: s_sub_i32 s4, s4, s8
+; GFX10-NEXT: s_cmp_lt_i32 s1, -1
+; GFX10-NEXT: s_cselect_b32 s9, s1, -1
+; GFX10-NEXT: s_sub_i32 s9, s9, s10
+; GFX10-NEXT: s_cmp_gt_i32 s4, s5
+; GFX10-NEXT: s_cselect_b32 s4, s4, s5
+; GFX10-NEXT: s_cmp_lt_i32 s4, s9
+; GFX10-NEXT: s_cselect_b32 s4, s4, s9
+; GFX10-NEXT: s_sub_i32 s1, s1, s4
+; GFX10-NEXT: s_cmp_gt_i32 s2, -1
+; GFX10-NEXT: s_cselect_b32 s4, s2, -1
+; GFX10-NEXT: s_sub_i32 s4, s4, s8
+; GFX10-NEXT: s_cmp_lt_i32 s2, -1
+; GFX10-NEXT: s_cselect_b32 s5, s2, -1
+; GFX10-NEXT: s_sub_i32 s5, s5, s10
+; GFX10-NEXT: s_cmp_gt_i32 s4, s6
+; GFX10-NEXT: s_cselect_b32 s4, s4, s6
+; GFX10-NEXT: s_cmp_lt_i32 s4, s5
+; GFX10-NEXT: s_cselect_b32 s4, s4, s5
+; GFX10-NEXT: s_sub_i32 s2, s2, s4
+; GFX10-NEXT: s_cmp_gt_i32 s3, -1
+; GFX10-NEXT: s_cselect_b32 s4, s3, -1
+; GFX10-NEXT: s_sub_i32 s4, s4, s8
+; GFX10-NEXT: s_cmp_lt_i32 s3, -1
+; GFX10-NEXT: s_cselect_b32 s5, s3, -1
+; GFX10-NEXT: s_sub_i32 s5, s5, s10
+; GFX10-NEXT: s_cmp_gt_i32 s4, s7
+; GFX10-NEXT: s_cselect_b32 s4, s4, s7
+; GFX10-NEXT: s_cmp_lt_i32 s4, s5
+; GFX10-NEXT: s_cselect_b32 s4, s4, s5
+; GFX10-NEXT: s_sub_i32 s3, s3, s4
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+ ret <4 x i32> %result
+}
+
+define <5 x i32> @v_ssubsat_v5i32(<5 x i32> %lhs, <5 x i32> %rhs) {
+; GFX6-LABEL: v_ssubsat_v5i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v10, -1, v0
+; GFX6-NEXT: v_subrev_i32_e32 v10, vcc, s4, v10
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v12, -1, v0
+; GFX6-NEXT: v_max_i32_e32 v5, v10, v5
+; GFX6-NEXT: v_subrev_i32_e32 v12, vcc, s5, v12
+; GFX6-NEXT: v_min_i32_e32 v5, v5, v12
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v5
+; GFX6-NEXT: v_max_i32_e32 v5, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v5, vcc, s4, v5
+; GFX6-NEXT: v_min_i32_e32 v10, -1, v1
+; GFX6-NEXT: v_max_i32_e32 v5, v5, v6
+; GFX6-NEXT: v_subrev_i32_e32 v10, vcc, s5, v10
+; GFX6-NEXT: v_min_i32_e32 v5, v5, v10
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v5
+; GFX6-NEXT: v_max_i32_e32 v5, -1, v2
+; GFX6-NEXT: v_subrev_i32_e32 v5, vcc, s4, v5
+; GFX6-NEXT: v_min_i32_e32 v6, -1, v2
+; GFX6-NEXT: v_subrev_i32_e32 v6, vcc, s5, v6
+; GFX6-NEXT: v_max_i32_e32 v5, v5, v7
+; GFX6-NEXT: v_min_i32_e32 v5, v5, v6
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v5
+; GFX6-NEXT: v_bfrev_b32_e32 v11, -2
+; GFX6-NEXT: v_max_i32_e32 v5, -1, v3
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v5, v11
+; GFX6-NEXT: v_mov_b32_e32 v13, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v6, -1, v3
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, v6, v13
+; GFX6-NEXT: v_max_i32_e32 v5, v5, v8
+; GFX6-NEXT: v_min_i32_e32 v5, v5, v6
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, v3, v5
+; GFX6-NEXT: v_max_i32_e32 v5, -1, v4
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v5, v11
+; GFX6-NEXT: v_min_i32_e32 v6, -1, v4
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, v6, v13
+; GFX6-NEXT: v_max_i32_e32 v5, v5, v9
+; GFX6-NEXT: v_min_i32_e32 v5, v5, v6
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, v4, v5
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_v5i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_brev_b32 s4, -2
+; GFX8-NEXT: v_max_i32_e32 v10, -1, v0
+; GFX8-NEXT: v_subrev_u32_e32 v10, vcc, s4, v10
+; GFX8-NEXT: s_mov_b32 s5, 0x80000000
+; GFX8-NEXT: v_min_i32_e32 v12, -1, v0
+; GFX8-NEXT: v_max_i32_e32 v5, v10, v5
+; GFX8-NEXT: v_subrev_u32_e32 v12, vcc, s5, v12
+; GFX8-NEXT: v_min_i32_e32 v5, v5, v12
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v5
+; GFX8-NEXT: v_max_i32_e32 v5, -1, v1
+; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s4, v5
+; GFX8-NEXT: v_min_i32_e32 v10, -1, v1
+; GFX8-NEXT: v_max_i32_e32 v5, v5, v6
+; GFX8-NEXT: v_subrev_u32_e32 v10, vcc, s5, v10
+; GFX8-NEXT: v_min_i32_e32 v5, v5, v10
+; GFX8-NEXT: v_sub_u32_e32 v1, vcc, v1, v5
+; GFX8-NEXT: v_max_i32_e32 v5, -1, v2
+; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s4, v5
+; GFX8-NEXT: v_min_i32_e32 v6, -1, v2
+; GFX8-NEXT: v_subrev_u32_e32 v6, vcc, s5, v6
+; GFX8-NEXT: v_max_i32_e32 v5, v5, v7
+; GFX8-NEXT: v_min_i32_e32 v5, v5, v6
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, v2, v5
+; GFX8-NEXT: v_bfrev_b32_e32 v11, -2
+; GFX8-NEXT: v_max_i32_e32 v5, -1, v3
+; GFX8-NEXT: v_sub_u32_e32 v5, vcc, v5, v11
+; GFX8-NEXT: v_mov_b32_e32 v13, 0x80000000
+; GFX8-NEXT: v_min_i32_e32 v6, -1, v3
+; GFX8-NEXT: v_sub_u32_e32 v6, vcc, v6, v13
+; GFX8-NEXT: v_max_i32_e32 v5, v5, v8
+; GFX8-NEXT: v_min_i32_e32 v5, v5, v6
+; GFX8-NEXT: v_sub_u32_e32 v3, vcc, v3, v5
+; GFX8-NEXT: v_max_i32_e32 v5, -1, v4
+; GFX8-NEXT: v_sub_u32_e32 v5, vcc, v5, v11
+; GFX8-NEXT: v_min_i32_e32 v6, -1, v4
+; GFX8-NEXT: v_sub_u32_e32 v6, vcc, v6, v13
+; GFX8-NEXT: v_max_i32_e32 v5, v5, v9
+; GFX8-NEXT: v_min_i32_e32 v5, v5, v6
+; GFX8-NEXT: v_sub_u32_e32 v4, vcc, v4, v5
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_v5i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_max_i32_e32 v10, -1, v0
+; GFX9-NEXT: v_subrev_u32_e32 v10, s4, v10
+; GFX9-NEXT: s_mov_b32 s5, 0x80000000
+; GFX9-NEXT: v_min_i32_e32 v12, -1, v0
+; GFX9-NEXT: v_max_i32_e32 v5, v10, v5
+; GFX9-NEXT: v_subrev_u32_e32 v12, s5, v12
+; GFX9-NEXT: v_min_i32_e32 v5, v5, v12
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v5
+; GFX9-NEXT: v_max_i32_e32 v5, -1, v1
+; GFX9-NEXT: v_subrev_u32_e32 v5, s4, v5
+; GFX9-NEXT: v_min_i32_e32 v10, -1, v1
+; GFX9-NEXT: v_max_i32_e32 v5, v5, v6
+; GFX9-NEXT: v_subrev_u32_e32 v10, s5, v10
+; GFX9-NEXT: v_min_i32_e32 v5, v5, v10
+; GFX9-NEXT: v_sub_u32_e32 v1, v1, v5
+; GFX9-NEXT: v_max_i32_e32 v5, -1, v2
+; GFX9-NEXT: v_subrev_u32_e32 v5, s4, v5
+; GFX9-NEXT: v_min_i32_e32 v6, -1, v2
+; GFX9-NEXT: v_subrev_u32_e32 v6, s5, v6
+; GFX9-NEXT: v_max_i32_e32 v5, v5, v7
+; GFX9-NEXT: v_min_i32_e32 v5, v5, v6
+; GFX9-NEXT: v_sub_u32_e32 v2, v2, v5
+; GFX9-NEXT: v_bfrev_b32_e32 v11, -2
+; GFX9-NEXT: v_max_i32_e32 v5, -1, v3
+; GFX9-NEXT: v_sub_u32_e32 v5, v5, v11
+; GFX9-NEXT: v_mov_b32_e32 v13, 0x80000000
+; GFX9-NEXT: v_min_i32_e32 v6, -1, v3
+; GFX9-NEXT: v_sub_u32_e32 v6, v6, v13
+; GFX9-NEXT: v_max_i32_e32 v5, v5, v8
+; GFX9-NEXT: v_min_i32_e32 v5, v5, v6
+; GFX9-NEXT: v_sub_u32_e32 v3, v3, v5
+; GFX9-NEXT: v_max_i32_e32 v5, -1, v4
+; GFX9-NEXT: v_sub_u32_e32 v5, v5, v11
+; GFX9-NEXT: v_min_i32_e32 v6, -1, v4
+; GFX9-NEXT: v_sub_u32_e32 v6, v6, v13
+; GFX9-NEXT: v_max_i32_e32 v5, v5, v9
+; GFX9-NEXT: v_min_i32_e32 v5, v5, v6
+; GFX9-NEXT: v_sub_u32_e32 v4, v4, v5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_v5i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_max_i32_e32 v10, -1, v0
+; GFX10-NEXT: v_max_i32_e32 v13, -1, v1
+; GFX10-NEXT: s_brev_b32 s4, -2
+; GFX10-NEXT: v_bfrev_b32_e32 v11, -2
+; GFX10-NEXT: v_max_i32_e32 v17, -1, v4
+; GFX10-NEXT: v_subrev_nc_u32_e32 v10, s4, v10
+; GFX10-NEXT: v_subrev_nc_u32_e32 v13, s4, v13
+; GFX10-NEXT: v_min_i32_e32 v12, -1, v0
+; GFX10-NEXT: v_mov_b32_e32 v14, 0x80000000
+; GFX10-NEXT: v_min_i32_e32 v15, -1, v1
+; GFX10-NEXT: v_max_i32_e32 v5, v10, v5
+; GFX10-NEXT: v_max_i32_e32 v10, -1, v2
+; GFX10-NEXT: v_max_i32_e32 v6, v13, v6
+; GFX10-NEXT: v_max_i32_e32 v13, -1, v3
+; GFX10-NEXT: v_min_i32_e32 v16, -1, v2
+; GFX10-NEXT: v_min_i32_e32 v23, -1, v3
+; GFX10-NEXT: v_subrev_nc_u32_e32 v10, s4, v10
+; GFX10-NEXT: v_min_i32_e32 v19, -1, v4
+; GFX10-NEXT: v_sub_nc_u32_e32 v13, v13, v11
+; GFX10-NEXT: v_sub_nc_u32_e32 v11, v17, v11
+; GFX10-NEXT: s_mov_b32 s5, 0x80000000
+; GFX10-NEXT: v_max_i32_e32 v7, v10, v7
+; GFX10-NEXT: v_subrev_nc_u32_e32 v12, s5, v12
+; GFX10-NEXT: v_subrev_nc_u32_e32 v15, s5, v15
+; GFX10-NEXT: v_subrev_nc_u32_e32 v16, s5, v16
+; GFX10-NEXT: v_max_i32_e32 v8, v13, v8
+; GFX10-NEXT: v_sub_nc_u32_e32 v10, v23, v14
+; GFX10-NEXT: v_sub_nc_u32_e32 v13, v19, v14
+; GFX10-NEXT: v_max_i32_e32 v11, v11, v9
+; GFX10-NEXT: v_min_i32_e32 v5, v5, v12
+; GFX10-NEXT: v_min_i32_e32 v6, v6, v15
+; GFX10-NEXT: v_min_i32_e32 v7, v7, v16
+; GFX10-NEXT: v_min_i32_e32 v8, v8, v10
+; GFX10-NEXT: v_min_i32_e32 v9, v11, v13
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v5
+; GFX10-NEXT: v_sub_nc_u32_e32 v1, v1, v6
+; GFX10-NEXT: v_sub_nc_u32_e32 v2, v2, v7
+; GFX10-NEXT: v_sub_nc_u32_e32 v3, v3, v8
+; GFX10-NEXT: v_sub_nc_u32_e32 v4, v4, v9
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <5 x i32> @llvm.ssub.sat.v5i32(<5 x i32> %lhs, <5 x i32> %rhs)
+ ret <5 x i32> %result
+}
+
+define amdgpu_ps <5 x i32> @s_ssubsat_v5i32(<5 x i32> inreg %lhs, <5 x i32> inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_v5i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_brev_b32 s10, -2
+; GFX6-NEXT: s_cselect_b32 s12, s0, -1
+; GFX6-NEXT: s_sub_i32 s12, s12, s10
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_mov_b32 s11, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s13, s0, -1
+; GFX6-NEXT: s_sub_i32 s13, s13, s11
+; GFX6-NEXT: s_cmp_gt_i32 s12, s5
+; GFX6-NEXT: s_cselect_b32 s5, s12, s5
+; GFX6-NEXT: s_cmp_lt_i32 s5, s13
+; GFX6-NEXT: s_cselect_b32 s5, s5, s13
+; GFX6-NEXT: s_sub_i32 s0, s0, s5
+; GFX6-NEXT: s_cmp_gt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s5, s1, -1
+; GFX6-NEXT: s_sub_i32 s5, s5, s10
+; GFX6-NEXT: s_cmp_lt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s12, s1, -1
+; GFX6-NEXT: s_sub_i32 s12, s12, s11
+; GFX6-NEXT: s_cmp_gt_i32 s5, s6
+; GFX6-NEXT: s_cselect_b32 s5, s5, s6
+; GFX6-NEXT: s_cmp_lt_i32 s5, s12
+; GFX6-NEXT: s_cselect_b32 s5, s5, s12
+; GFX6-NEXT: s_sub_i32 s1, s1, s5
+; GFX6-NEXT: s_cmp_gt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s5, s2, -1
+; GFX6-NEXT: s_sub_i32 s5, s5, s10
+; GFX6-NEXT: s_cmp_lt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s6, s2, -1
+; GFX6-NEXT: s_sub_i32 s6, s6, s11
+; GFX6-NEXT: s_cmp_gt_i32 s5, s7
+; GFX6-NEXT: s_cselect_b32 s5, s5, s7
+; GFX6-NEXT: s_cmp_lt_i32 s5, s6
+; GFX6-NEXT: s_cselect_b32 s5, s5, s6
+; GFX6-NEXT: s_sub_i32 s2, s2, s5
+; GFX6-NEXT: s_cmp_gt_i32 s3, -1
+; GFX6-NEXT: s_cselect_b32 s5, s3, -1
+; GFX6-NEXT: s_sub_i32 s5, s5, s10
+; GFX6-NEXT: s_cmp_lt_i32 s3, -1
+; GFX6-NEXT: s_cselect_b32 s6, s3, -1
+; GFX6-NEXT: s_sub_i32 s6, s6, s11
+; GFX6-NEXT: s_cmp_gt_i32 s5, s8
+; GFX6-NEXT: s_cselect_b32 s5, s5, s8
+; GFX6-NEXT: s_cmp_lt_i32 s5, s6
+; GFX6-NEXT: s_cselect_b32 s5, s5, s6
+; GFX6-NEXT: s_sub_i32 s3, s3, s5
+; GFX6-NEXT: s_cmp_gt_i32 s4, -1
+; GFX6-NEXT: s_cselect_b32 s5, s4, -1
+; GFX6-NEXT: s_sub_i32 s5, s5, s10
+; GFX6-NEXT: s_cmp_lt_i32 s4, -1
+; GFX6-NEXT: s_cselect_b32 s6, s4, -1
+; GFX6-NEXT: s_sub_i32 s6, s6, s11
+; GFX6-NEXT: s_cmp_gt_i32 s5, s9
+; GFX6-NEXT: s_cselect_b32 s5, s5, s9
+; GFX6-NEXT: s_cmp_lt_i32 s5, s6
+; GFX6-NEXT: s_cselect_b32 s5, s5, s6
+; GFX6-NEXT: s_sub_i32 s4, s4, s5
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_v5i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_gt_i32 s0, -1
+; GFX8-NEXT: s_brev_b32 s10, -2
+; GFX8-NEXT: s_cselect_b32 s12, s0, -1
+; GFX8-NEXT: s_sub_i32 s12, s12, s10
+; GFX8-NEXT: s_cmp_lt_i32 s0, -1
+; GFX8-NEXT: s_mov_b32 s11, 0x80000000
+; GFX8-NEXT: s_cselect_b32 s13, s0, -1
+; GFX8-NEXT: s_sub_i32 s13, s13, s11
+; GFX8-NEXT: s_cmp_gt_i32 s12, s5
+; GFX8-NEXT: s_cselect_b32 s5, s12, s5
+; GFX8-NEXT: s_cmp_lt_i32 s5, s13
+; GFX8-NEXT: s_cselect_b32 s5, s5, s13
+; GFX8-NEXT: s_sub_i32 s0, s0, s5
+; GFX8-NEXT: s_cmp_gt_i32 s1, -1
+; GFX8-NEXT: s_cselect_b32 s5, s1, -1
+; GFX8-NEXT: s_sub_i32 s5, s5, s10
+; GFX8-NEXT: s_cmp_lt_i32 s1, -1
+; GFX8-NEXT: s_cselect_b32 s12, s1, -1
+; GFX8-NEXT: s_sub_i32 s12, s12, s11
+; GFX8-NEXT: s_cmp_gt_i32 s5, s6
+; GFX8-NEXT: s_cselect_b32 s5, s5, s6
+; GFX8-NEXT: s_cmp_lt_i32 s5, s12
+; GFX8-NEXT: s_cselect_b32 s5, s5, s12
+; GFX8-NEXT: s_sub_i32 s1, s1, s5
+; GFX8-NEXT: s_cmp_gt_i32 s2, -1
+; GFX8-NEXT: s_cselect_b32 s5, s2, -1
+; GFX8-NEXT: s_sub_i32 s5, s5, s10
+; GFX8-NEXT: s_cmp_lt_i32 s2, -1
+; GFX8-NEXT: s_cselect_b32 s6, s2, -1
+; GFX8-NEXT: s_sub_i32 s6, s6, s11
+; GFX8-NEXT: s_cmp_gt_i32 s5, s7
+; GFX8-NEXT: s_cselect_b32 s5, s5, s7
+; GFX8-NEXT: s_cmp_lt_i32 s5, s6
+; GFX8-NEXT: s_cselect_b32 s5, s5, s6
+; GFX8-NEXT: s_sub_i32 s2, s2, s5
+; GFX8-NEXT: s_cmp_gt_i32 s3, -1
+; GFX8-NEXT: s_cselect_b32 s5, s3, -1
+; GFX8-NEXT: s_sub_i32 s5, s5, s10
+; GFX8-NEXT: s_cmp_lt_i32 s3, -1
+; GFX8-NEXT: s_cselect_b32 s6, s3, -1
+; GFX8-NEXT: s_sub_i32 s6, s6, s11
+; GFX8-NEXT: s_cmp_gt_i32 s5, s8
+; GFX8-NEXT: s_cselect_b32 s5, s5, s8
+; GFX8-NEXT: s_cmp_lt_i32 s5, s6
+; GFX8-NEXT: s_cselect_b32 s5, s5, s6
+; GFX8-NEXT: s_sub_i32 s3, s3, s5
+; GFX8-NEXT: s_cmp_gt_i32 s4, -1
+; GFX8-NEXT: s_cselect_b32 s5, s4, -1
+; GFX8-NEXT: s_sub_i32 s5, s5, s10
+; GFX8-NEXT: s_cmp_lt_i32 s4, -1
+; GFX8-NEXT: s_cselect_b32 s6, s4, -1
+; GFX8-NEXT: s_sub_i32 s6, s6, s11
+; GFX8-NEXT: s_cmp_gt_i32 s5, s9
+; GFX8-NEXT: s_cselect_b32 s5, s5, s9
+; GFX8-NEXT: s_cmp_lt_i32 s5, s6
+; GFX8-NEXT: s_cselect_b32 s5, s5, s6
+; GFX8-NEXT: s_sub_i32 s4, s4, s5
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_v5i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_gt_i32 s0, -1
+; GFX9-NEXT: s_brev_b32 s10, -2
+; GFX9-NEXT: s_cselect_b32 s12, s0, -1
+; GFX9-NEXT: s_sub_i32 s12, s12, s10
+; GFX9-NEXT: s_cmp_lt_i32 s0, -1
+; GFX9-NEXT: s_mov_b32 s11, 0x80000000
+; GFX9-NEXT: s_cselect_b32 s13, s0, -1
+; GFX9-NEXT: s_sub_i32 s13, s13, s11
+; GFX9-NEXT: s_cmp_gt_i32 s12, s5
+; GFX9-NEXT: s_cselect_b32 s5, s12, s5
+; GFX9-NEXT: s_cmp_lt_i32 s5, s13
+; GFX9-NEXT: s_cselect_b32 s5, s5, s13
+; GFX9-NEXT: s_sub_i32 s0, s0, s5
+; GFX9-NEXT: s_cmp_gt_i32 s1, -1
+; GFX9-NEXT: s_cselect_b32 s5, s1, -1
+; GFX9-NEXT: s_sub_i32 s5, s5, s10
+; GFX9-NEXT: s_cmp_lt_i32 s1, -1
+; GFX9-NEXT: s_cselect_b32 s12, s1, -1
+; GFX9-NEXT: s_sub_i32 s12, s12, s11
+; GFX9-NEXT: s_cmp_gt_i32 s5, s6
+; GFX9-NEXT: s_cselect_b32 s5, s5, s6
+; GFX9-NEXT: s_cmp_lt_i32 s5, s12
+; GFX9-NEXT: s_cselect_b32 s5, s5, s12
+; GFX9-NEXT: s_sub_i32 s1, s1, s5
+; GFX9-NEXT: s_cmp_gt_i32 s2, -1
+; GFX9-NEXT: s_cselect_b32 s5, s2, -1
+; GFX9-NEXT: s_sub_i32 s5, s5, s10
+; GFX9-NEXT: s_cmp_lt_i32 s2, -1
+; GFX9-NEXT: s_cselect_b32 s6, s2, -1
+; GFX9-NEXT: s_sub_i32 s6, s6, s11
+; GFX9-NEXT: s_cmp_gt_i32 s5, s7
+; GFX9-NEXT: s_cselect_b32 s5, s5, s7
+; GFX9-NEXT: s_cmp_lt_i32 s5, s6
+; GFX9-NEXT: s_cselect_b32 s5, s5, s6
+; GFX9-NEXT: s_sub_i32 s2, s2, s5
+; GFX9-NEXT: s_cmp_gt_i32 s3, -1
+; GFX9-NEXT: s_cselect_b32 s5, s3, -1
+; GFX9-NEXT: s_sub_i32 s5, s5, s10
+; GFX9-NEXT: s_cmp_lt_i32 s3, -1
+; GFX9-NEXT: s_cselect_b32 s6, s3, -1
+; GFX9-NEXT: s_sub_i32 s6, s6, s11
+; GFX9-NEXT: s_cmp_gt_i32 s5, s8
+; GFX9-NEXT: s_cselect_b32 s5, s5, s8
+; GFX9-NEXT: s_cmp_lt_i32 s5, s6
+; GFX9-NEXT: s_cselect_b32 s5, s5, s6
+; GFX9-NEXT: s_sub_i32 s3, s3, s5
+; GFX9-NEXT: s_cmp_gt_i32 s4, -1
+; GFX9-NEXT: s_cselect_b32 s5, s4, -1
+; GFX9-NEXT: s_sub_i32 s5, s5, s10
+; GFX9-NEXT: s_cmp_lt_i32 s4, -1
+; GFX9-NEXT: s_cselect_b32 s6, s4, -1
+; GFX9-NEXT: s_sub_i32 s6, s6, s11
+; GFX9-NEXT: s_cmp_gt_i32 s5, s9
+; GFX9-NEXT: s_cselect_b32 s5, s5, s9
+; GFX9-NEXT: s_cmp_lt_i32 s5, s6
+; GFX9-NEXT: s_cselect_b32 s5, s5, s6
+; GFX9-NEXT: s_sub_i32 s4, s4, s5
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_v5i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_gt_i32 s0, -1
+; GFX10-NEXT: s_brev_b32 s10, -2
+; GFX10-NEXT: s_cselect_b32 s11, s0, -1
+; GFX10-NEXT: s_mov_b32 s12, 0x80000000
+; GFX10-NEXT: s_sub_i32 s11, s11, s10
+; GFX10-NEXT: s_cmp_lt_i32 s0, -1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s13, s0, -1
+; GFX10-NEXT: s_sub_i32 s13, s13, s12
+; GFX10-NEXT: s_cmp_gt_i32 s11, s5
+; GFX10-NEXT: s_cselect_b32 s5, s11, s5
+; GFX10-NEXT: s_cmp_lt_i32 s5, s13
+; GFX10-NEXT: s_cselect_b32 s5, s5, s13
+; GFX10-NEXT: s_sub_i32 s0, s0, s5
+; GFX10-NEXT: s_cmp_gt_i32 s1, -1
+; GFX10-NEXT: s_cselect_b32 s5, s1, -1
+; GFX10-NEXT: s_sub_i32 s5, s5, s10
+; GFX10-NEXT: s_cmp_lt_i32 s1, -1
+; GFX10-NEXT: s_cselect_b32 s11, s1, -1
+; GFX10-NEXT: s_sub_i32 s11, s11, s12
+; GFX10-NEXT: s_cmp_gt_i32 s5, s6
+; GFX10-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-NEXT: s_cmp_lt_i32 s5, s11
+; GFX10-NEXT: s_cselect_b32 s5, s5, s11
+; GFX10-NEXT: s_sub_i32 s1, s1, s5
+; GFX10-NEXT: s_cmp_gt_i32 s2, -1
+; GFX10-NEXT: s_cselect_b32 s5, s2, -1
+; GFX10-NEXT: s_sub_i32 s5, s5, s10
+; GFX10-NEXT: s_cmp_lt_i32 s2, -1
+; GFX10-NEXT: s_cselect_b32 s6, s2, -1
+; GFX10-NEXT: s_sub_i32 s6, s6, s12
+; GFX10-NEXT: s_cmp_gt_i32 s5, s7
+; GFX10-NEXT: s_cselect_b32 s5, s5, s7
+; GFX10-NEXT: s_cmp_lt_i32 s5, s6
+; GFX10-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-NEXT: s_sub_i32 s2, s2, s5
+; GFX10-NEXT: s_cmp_gt_i32 s3, -1
+; GFX10-NEXT: s_cselect_b32 s5, s3, -1
+; GFX10-NEXT: s_sub_i32 s5, s5, s10
+; GFX10-NEXT: s_cmp_lt_i32 s3, -1
+; GFX10-NEXT: s_cselect_b32 s6, s3, -1
+; GFX10-NEXT: s_sub_i32 s6, s6, s12
+; GFX10-NEXT: s_cmp_gt_i32 s5, s8
+; GFX10-NEXT: s_cselect_b32 s5, s5, s8
+; GFX10-NEXT: s_cmp_lt_i32 s5, s6
+; GFX10-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-NEXT: s_sub_i32 s3, s3, s5
+; GFX10-NEXT: s_cmp_gt_i32 s4, -1
+; GFX10-NEXT: s_cselect_b32 s5, s4, -1
+; GFX10-NEXT: s_sub_i32 s5, s5, s10
+; GFX10-NEXT: s_cmp_lt_i32 s4, -1
+; GFX10-NEXT: s_cselect_b32 s6, s4, -1
+; GFX10-NEXT: s_sub_i32 s6, s6, s12
+; GFX10-NEXT: s_cmp_gt_i32 s5, s9
+; GFX10-NEXT: s_cselect_b32 s5, s5, s9
+; GFX10-NEXT: s_cmp_lt_i32 s5, s6
+; GFX10-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-NEXT: s_sub_i32 s4, s4, s5
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <5 x i32> @llvm.ssub.sat.v5i32(<5 x i32> %lhs, <5 x i32> %rhs)
+ ret <5 x i32> %result
+}
+
+define <16 x i32> @v_ssubsat_v16i32(<16 x i32> %lhs, <16 x i32> %rhs) {
+; GFX6-LABEL: v_ssubsat_v16i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v32, -1, v0
+; GFX6-NEXT: v_subrev_i32_e32 v32, vcc, s4, v32
+; GFX6-NEXT: v_max_i32_e32 v16, v32, v16
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v32, -1, v0
+; GFX6-NEXT: v_subrev_i32_e32 v32, vcc, s5, v32
+; GFX6-NEXT: v_min_i32_e32 v16, v16, v32
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v16
+; GFX6-NEXT: v_max_i32_e32 v16, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v16, vcc, s4, v16
+; GFX6-NEXT: v_max_i32_e32 v16, v16, v17
+; GFX6-NEXT: v_min_i32_e32 v17, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v17, vcc, s5, v17
+; GFX6-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v16
+; GFX6-NEXT: v_max_i32_e32 v16, -1, v2
+; GFX6-NEXT: v_subrev_i32_e32 v16, vcc, s4, v16
+; GFX6-NEXT: v_min_i32_e32 v17, -1, v2
+; GFX6-NEXT: v_max_i32_e32 v16, v16, v18
+; GFX6-NEXT: v_subrev_i32_e32 v17, vcc, s5, v17
+; GFX6-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v16
+; GFX6-NEXT: v_bfrev_b32_e32 v16, -2
+; GFX6-NEXT: v_max_i32_e32 v17, -1, v3
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v17, v16
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_mov_b32_e32 v18, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v19, -1, v3
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v19, v18
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, v3, v17
+; GFX6-NEXT: v_max_i32_e32 v17, -1, v4
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v17, v16
+; GFX6-NEXT: v_min_i32_e32 v19, -1, v4
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v20
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v19, v18
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, v4, v17
+; GFX6-NEXT: v_max_i32_e32 v17, -1, v5
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v17, v16
+; GFX6-NEXT: v_min_i32_e32 v19, -1, v5
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v21
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v19, v18
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v5, v17
+; GFX6-NEXT: v_max_i32_e32 v17, -1, v6
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v17, v16
+; GFX6-NEXT: v_min_i32_e32 v19, -1, v6
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v22
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v19, v18
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, v6, v17
+; GFX6-NEXT: v_max_i32_e32 v17, -1, v7
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v17, v16
+; GFX6-NEXT: v_min_i32_e32 v19, -1, v7
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v23
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v19, v18
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_sub_i32_e32 v7, vcc, v7, v17
+; GFX6-NEXT: v_max_i32_e32 v17, -1, v8
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v17, v16
+; GFX6-NEXT: v_min_i32_e32 v19, -1, v8
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v24
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v19, v18
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, v8, v17
+; GFX6-NEXT: v_max_i32_e32 v17, -1, v9
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v17, v16
+; GFX6-NEXT: v_min_i32_e32 v19, -1, v9
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v25
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v19, v18
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, v9, v17
+; GFX6-NEXT: v_max_i32_e32 v17, -1, v10
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v17, v16
+; GFX6-NEXT: v_min_i32_e32 v19, -1, v10
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v26
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v19, v18
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, v10, v17
+; GFX6-NEXT: v_max_i32_e32 v17, -1, v11
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v17, v16
+; GFX6-NEXT: v_min_i32_e32 v19, -1, v11
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v27
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v19, v18
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_sub_i32_e32 v11, vcc, v11, v17
+; GFX6-NEXT: v_max_i32_e32 v17, -1, v12
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v17, v16
+; GFX6-NEXT: v_min_i32_e32 v19, -1, v12
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v28
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v19, v18
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_sub_i32_e32 v12, vcc, v12, v17
+; GFX6-NEXT: v_max_i32_e32 v17, -1, v13
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v17, v16
+; GFX6-NEXT: v_min_i32_e32 v19, -1, v13
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v29
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v19, v18
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_sub_i32_e32 v13, vcc, v13, v17
+; GFX6-NEXT: v_max_i32_e32 v17, -1, v14
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v17, v16
+; GFX6-NEXT: v_min_i32_e32 v19, -1, v14
+; GFX6-NEXT: v_max_i32_e32 v17, v17, v30
+; GFX6-NEXT: v_sub_i32_e32 v19, vcc, v19, v18
+; GFX6-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX6-NEXT: v_sub_i32_e32 v14, vcc, v14, v17
+; GFX6-NEXT: v_max_i32_e32 v17, -1, v15
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, v17, v16
+; GFX6-NEXT: v_min_i32_e32 v17, -1, v15
+; GFX6-NEXT: v_sub_i32_e32 v17, vcc, v17, v18
+; GFX6-NEXT: v_max_i32_e32 v16, v16, v31
+; GFX6-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX6-NEXT: v_sub_i32_e32 v15, vcc, v15, v16
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_v16i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_brev_b32 s4, -2
+; GFX8-NEXT: v_max_i32_e32 v32, -1, v0
+; GFX8-NEXT: v_subrev_u32_e32 v32, vcc, s4, v32
+; GFX8-NEXT: v_max_i32_e32 v16, v32, v16
+; GFX8-NEXT: s_mov_b32 s5, 0x80000000
+; GFX8-NEXT: v_min_i32_e32 v32, -1, v0
+; GFX8-NEXT: v_subrev_u32_e32 v32, vcc, s5, v32
+; GFX8-NEXT: v_min_i32_e32 v16, v16, v32
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v16
+; GFX8-NEXT: v_max_i32_e32 v16, -1, v1
+; GFX8-NEXT: v_subrev_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_max_i32_e32 v16, v16, v17
+; GFX8-NEXT: v_min_i32_e32 v17, -1, v1
+; GFX8-NEXT: v_subrev_u32_e32 v17, vcc, s5, v17
+; GFX8-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX8-NEXT: v_sub_u32_e32 v1, vcc, v1, v16
+; GFX8-NEXT: v_max_i32_e32 v16, -1, v2
+; GFX8-NEXT: v_subrev_u32_e32 v16, vcc, s4, v16
+; GFX8-NEXT: v_min_i32_e32 v17, -1, v2
+; GFX8-NEXT: v_max_i32_e32 v16, v16, v18
+; GFX8-NEXT: v_subrev_u32_e32 v17, vcc, s5, v17
+; GFX8-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, v2, v16
+; GFX8-NEXT: v_bfrev_b32_e32 v16, -2
+; GFX8-NEXT: v_max_i32_e32 v17, -1, v3
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v17, v16
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_mov_b32_e32 v18, 0x80000000
+; GFX8-NEXT: v_min_i32_e32 v19, -1, v3
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v19, v18
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_sub_u32_e32 v3, vcc, v3, v17
+; GFX8-NEXT: v_max_i32_e32 v17, -1, v4
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v17, v16
+; GFX8-NEXT: v_min_i32_e32 v19, -1, v4
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v20
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v19, v18
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_sub_u32_e32 v4, vcc, v4, v17
+; GFX8-NEXT: v_max_i32_e32 v17, -1, v5
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v17, v16
+; GFX8-NEXT: v_min_i32_e32 v19, -1, v5
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v21
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v19, v18
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_sub_u32_e32 v5, vcc, v5, v17
+; GFX8-NEXT: v_max_i32_e32 v17, -1, v6
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v17, v16
+; GFX8-NEXT: v_min_i32_e32 v19, -1, v6
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v22
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v19, v18
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_sub_u32_e32 v6, vcc, v6, v17
+; GFX8-NEXT: v_max_i32_e32 v17, -1, v7
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v17, v16
+; GFX8-NEXT: v_min_i32_e32 v19, -1, v7
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v23
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v19, v18
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_sub_u32_e32 v7, vcc, v7, v17
+; GFX8-NEXT: v_max_i32_e32 v17, -1, v8
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v17, v16
+; GFX8-NEXT: v_min_i32_e32 v19, -1, v8
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v24
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v19, v18
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_sub_u32_e32 v8, vcc, v8, v17
+; GFX8-NEXT: v_max_i32_e32 v17, -1, v9
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v17, v16
+; GFX8-NEXT: v_min_i32_e32 v19, -1, v9
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v25
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v19, v18
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_sub_u32_e32 v9, vcc, v9, v17
+; GFX8-NEXT: v_max_i32_e32 v17, -1, v10
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v17, v16
+; GFX8-NEXT: v_min_i32_e32 v19, -1, v10
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v26
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v19, v18
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_sub_u32_e32 v10, vcc, v10, v17
+; GFX8-NEXT: v_max_i32_e32 v17, -1, v11
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v17, v16
+; GFX8-NEXT: v_min_i32_e32 v19, -1, v11
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v27
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v19, v18
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_sub_u32_e32 v11, vcc, v11, v17
+; GFX8-NEXT: v_max_i32_e32 v17, -1, v12
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v17, v16
+; GFX8-NEXT: v_min_i32_e32 v19, -1, v12
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v28
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v19, v18
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_sub_u32_e32 v12, vcc, v12, v17
+; GFX8-NEXT: v_max_i32_e32 v17, -1, v13
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v17, v16
+; GFX8-NEXT: v_min_i32_e32 v19, -1, v13
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v29
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v19, v18
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_sub_u32_e32 v13, vcc, v13, v17
+; GFX8-NEXT: v_max_i32_e32 v17, -1, v14
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v17, v16
+; GFX8-NEXT: v_min_i32_e32 v19, -1, v14
+; GFX8-NEXT: v_max_i32_e32 v17, v17, v30
+; GFX8-NEXT: v_sub_u32_e32 v19, vcc, v19, v18
+; GFX8-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX8-NEXT: v_sub_u32_e32 v14, vcc, v14, v17
+; GFX8-NEXT: v_max_i32_e32 v17, -1, v15
+; GFX8-NEXT: v_sub_u32_e32 v16, vcc, v17, v16
+; GFX8-NEXT: v_min_i32_e32 v17, -1, v15
+; GFX8-NEXT: v_sub_u32_e32 v17, vcc, v17, v18
+; GFX8-NEXT: v_max_i32_e32 v16, v16, v31
+; GFX8-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX8-NEXT: v_sub_u32_e32 v15, vcc, v15, v16
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_v16i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_brev_b32 s4, -2
+; GFX9-NEXT: v_max_i32_e32 v32, -1, v0
+; GFX9-NEXT: v_subrev_u32_e32 v32, s4, v32
+; GFX9-NEXT: v_max_i32_e32 v16, v32, v16
+; GFX9-NEXT: s_mov_b32 s5, 0x80000000
+; GFX9-NEXT: v_min_i32_e32 v32, -1, v0
+; GFX9-NEXT: v_subrev_u32_e32 v32, s5, v32
+; GFX9-NEXT: v_min_i32_e32 v16, v16, v32
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v16
+; GFX9-NEXT: v_max_i32_e32 v16, -1, v1
+; GFX9-NEXT: v_subrev_u32_e32 v16, s4, v16
+; GFX9-NEXT: v_max_i32_e32 v16, v16, v17
+; GFX9-NEXT: v_min_i32_e32 v17, -1, v1
+; GFX9-NEXT: v_subrev_u32_e32 v17, s5, v17
+; GFX9-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX9-NEXT: v_sub_u32_e32 v1, v1, v16
+; GFX9-NEXT: v_max_i32_e32 v16, -1, v2
+; GFX9-NEXT: v_subrev_u32_e32 v16, s4, v16
+; GFX9-NEXT: v_min_i32_e32 v17, -1, v2
+; GFX9-NEXT: v_max_i32_e32 v16, v16, v18
+; GFX9-NEXT: v_subrev_u32_e32 v17, s5, v17
+; GFX9-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX9-NEXT: v_sub_u32_e32 v2, v2, v16
+; GFX9-NEXT: v_bfrev_b32_e32 v16, -2
+; GFX9-NEXT: v_max_i32_e32 v17, -1, v3
+; GFX9-NEXT: v_sub_u32_e32 v17, v17, v16
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_mov_b32_e32 v18, 0x80000000
+; GFX9-NEXT: v_min_i32_e32 v19, -1, v3
+; GFX9-NEXT: v_sub_u32_e32 v19, v19, v18
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_sub_u32_e32 v3, v3, v17
+; GFX9-NEXT: v_max_i32_e32 v17, -1, v4
+; GFX9-NEXT: v_sub_u32_e32 v17, v17, v16
+; GFX9-NEXT: v_min_i32_e32 v19, -1, v4
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v20
+; GFX9-NEXT: v_sub_u32_e32 v19, v19, v18
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_sub_u32_e32 v4, v4, v17
+; GFX9-NEXT: v_max_i32_e32 v17, -1, v5
+; GFX9-NEXT: v_sub_u32_e32 v17, v17, v16
+; GFX9-NEXT: v_min_i32_e32 v19, -1, v5
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v21
+; GFX9-NEXT: v_sub_u32_e32 v19, v19, v18
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_sub_u32_e32 v5, v5, v17
+; GFX9-NEXT: v_max_i32_e32 v17, -1, v6
+; GFX9-NEXT: v_sub_u32_e32 v17, v17, v16
+; GFX9-NEXT: v_min_i32_e32 v19, -1, v6
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v22
+; GFX9-NEXT: v_sub_u32_e32 v19, v19, v18
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_sub_u32_e32 v6, v6, v17
+; GFX9-NEXT: v_max_i32_e32 v17, -1, v7
+; GFX9-NEXT: v_sub_u32_e32 v17, v17, v16
+; GFX9-NEXT: v_min_i32_e32 v19, -1, v7
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v23
+; GFX9-NEXT: v_sub_u32_e32 v19, v19, v18
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_sub_u32_e32 v7, v7, v17
+; GFX9-NEXT: v_max_i32_e32 v17, -1, v8
+; GFX9-NEXT: v_sub_u32_e32 v17, v17, v16
+; GFX9-NEXT: v_min_i32_e32 v19, -1, v8
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v24
+; GFX9-NEXT: v_sub_u32_e32 v19, v19, v18
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_sub_u32_e32 v8, v8, v17
+; GFX9-NEXT: v_max_i32_e32 v17, -1, v9
+; GFX9-NEXT: v_sub_u32_e32 v17, v17, v16
+; GFX9-NEXT: v_min_i32_e32 v19, -1, v9
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v25
+; GFX9-NEXT: v_sub_u32_e32 v19, v19, v18
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_sub_u32_e32 v9, v9, v17
+; GFX9-NEXT: v_max_i32_e32 v17, -1, v10
+; GFX9-NEXT: v_sub_u32_e32 v17, v17, v16
+; GFX9-NEXT: v_min_i32_e32 v19, -1, v10
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v26
+; GFX9-NEXT: v_sub_u32_e32 v19, v19, v18
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_sub_u32_e32 v10, v10, v17
+; GFX9-NEXT: v_max_i32_e32 v17, -1, v11
+; GFX9-NEXT: v_sub_u32_e32 v17, v17, v16
+; GFX9-NEXT: v_min_i32_e32 v19, -1, v11
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v27
+; GFX9-NEXT: v_sub_u32_e32 v19, v19, v18
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_sub_u32_e32 v11, v11, v17
+; GFX9-NEXT: v_max_i32_e32 v17, -1, v12
+; GFX9-NEXT: v_sub_u32_e32 v17, v17, v16
+; GFX9-NEXT: v_min_i32_e32 v19, -1, v12
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v28
+; GFX9-NEXT: v_sub_u32_e32 v19, v19, v18
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_sub_u32_e32 v12, v12, v17
+; GFX9-NEXT: v_max_i32_e32 v17, -1, v13
+; GFX9-NEXT: v_sub_u32_e32 v17, v17, v16
+; GFX9-NEXT: v_min_i32_e32 v19, -1, v13
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v29
+; GFX9-NEXT: v_sub_u32_e32 v19, v19, v18
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_sub_u32_e32 v13, v13, v17
+; GFX9-NEXT: v_max_i32_e32 v17, -1, v14
+; GFX9-NEXT: v_sub_u32_e32 v17, v17, v16
+; GFX9-NEXT: v_min_i32_e32 v19, -1, v14
+; GFX9-NEXT: v_max_i32_e32 v17, v17, v30
+; GFX9-NEXT: v_sub_u32_e32 v19, v19, v18
+; GFX9-NEXT: v_min_i32_e32 v17, v17, v19
+; GFX9-NEXT: v_sub_u32_e32 v14, v14, v17
+; GFX9-NEXT: v_max_i32_e32 v17, -1, v15
+; GFX9-NEXT: v_sub_u32_e32 v16, v17, v16
+; GFX9-NEXT: v_min_i32_e32 v17, -1, v15
+; GFX9-NEXT: v_sub_u32_e32 v17, v17, v18
+; GFX9-NEXT: v_max_i32_e32 v16, v16, v31
+; GFX9-NEXT: v_min_i32_e32 v16, v16, v17
+; GFX9-NEXT: v_sub_u32_e32 v15, v15, v16
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_v16i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_max_i32_e32 v32, -1, v0
+; GFX10-NEXT: s_brev_b32 s4, -2
+; GFX10-NEXT: v_min_i32_e32 v33, -1, v0
+; GFX10-NEXT: s_mov_b32 s5, 0x80000000
+; GFX10-NEXT: v_max_i32_e32 v36, -1, v2
+; GFX10-NEXT: v_subrev_nc_u32_e32 v35, s4, v32
+; GFX10-NEXT: v_max_i32_e32 v32, -1, v1
+; GFX10-NEXT: v_subrev_nc_u32_e32 v33, s5, v33
+; GFX10-NEXT: v_bfrev_b32_e32 v34, -2
+; GFX10-NEXT: v_subrev_nc_u32_e32 v36, s4, v36
+; GFX10-NEXT: v_max_i32_e32 v16, v35, v16
+; GFX10-NEXT: v_subrev_nc_u32_e32 v32, s4, v32
+; GFX10-NEXT: v_max_i32_e32 v39, -1, v3
+; GFX10-NEXT: v_min_i32_e32 v37, -1, v1
+; GFX10-NEXT: v_max_i32_e32 v18, v36, v18
+; GFX10-NEXT: v_min_i32_e32 v16, v16, v33
+; GFX10-NEXT: v_min_i32_e32 v33, -1, v2
+; GFX10-NEXT: v_max_i32_e32 v38, v32, v17
+; GFX10-NEXT: v_max_i32_e32 v17, -1, v4
+; GFX10-NEXT: v_sub_nc_u32_e32 v36, v39, v34
+; GFX10-NEXT: v_mov_b32_e32 v35, 0x80000000
+; GFX10-NEXT: v_subrev_nc_u32_e32 v32, s5, v33
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v16
+; GFX10-NEXT: v_min_i32_e32 v33, -1, v3
+; GFX10-NEXT: v_sub_nc_u32_e32 v17, v17, v34
+; GFX10-NEXT: v_subrev_nc_u32_e32 v37, s5, v37
+; GFX10-NEXT: v_min_i32_e32 v16, v18, v32
+; GFX10-NEXT: v_max_i32_e32 v19, v36, v19
+; GFX10-NEXT: v_sub_nc_u32_e32 v18, v33, v35
+; GFX10-NEXT: v_max_i32_e32 v17, v17, v20
+; GFX10-NEXT: v_min_i32_e32 v39, v38, v37
+; GFX10-NEXT: v_sub_nc_u32_e32 v2, v2, v16
+; GFX10-NEXT: v_min_i32_e32 v16, -1, v4
+; GFX10-NEXT: v_min_i32_e32 v18, v19, v18
+; GFX10-NEXT: v_max_i32_e32 v19, -1, v5
+; GFX10-NEXT: v_max_i32_e32 v32, -1, v6
+; GFX10-NEXT: v_min_i32_e32 v33, -1, v5
+; GFX10-NEXT: v_sub_nc_u32_e32 v16, v16, v35
+; GFX10-NEXT: v_sub_nc_u32_e32 v1, v1, v39
+; GFX10-NEXT: v_sub_nc_u32_e32 v19, v19, v34
+; GFX10-NEXT: v_sub_nc_u32_e32 v32, v32, v34
+; GFX10-NEXT: v_min_i32_e32 v36, -1, v6
+; GFX10-NEXT: v_min_i32_e32 v39, v17, v16
+; GFX10-NEXT: v_max_i32_e32 v17, -1, v7
+; GFX10-NEXT: v_min_i32_e32 v16, -1, v7
+; GFX10-NEXT: v_max_i32_e32 v19, v19, v21
+; GFX10-NEXT: v_sub_nc_u32_e32 v20, v33, v35
+; GFX10-NEXT: v_sub_nc_u32_e32 v4, v4, v39
+; GFX10-NEXT: v_sub_nc_u32_e32 v17, v17, v34
+; GFX10-NEXT: v_sub_nc_u32_e32 v21, v36, v35
+; GFX10-NEXT: v_max_i32_e32 v22, v32, v22
+; GFX10-NEXT: v_min_i32_e32 v38, v19, v20
+; GFX10-NEXT: v_max_i32_e32 v20, -1, v9
+; GFX10-NEXT: v_max_i32_e32 v39, -1, v8
+; GFX10-NEXT: v_sub_nc_u32_e32 v16, v16, v35
+; GFX10-NEXT: v_max_i32_e32 v17, v17, v23
+; GFX10-NEXT: v_min_i32_e32 v19, v22, v21
+; GFX10-NEXT: v_min_i32_e32 v21, -1, v9
+; GFX10-NEXT: v_sub_nc_u32_e32 v20, v20, v34
+; GFX10-NEXT: v_sub_nc_u32_e32 v3, v3, v18
+; GFX10-NEXT: v_sub_nc_u32_e32 v18, v39, v34
+; GFX10-NEXT: v_max_i32_e32 v39, -1, v10
+; GFX10-NEXT: v_min_i32_e32 v16, v17, v16
+; GFX10-NEXT: v_min_i32_e32 v22, -1, v8
+; GFX10-NEXT: v_sub_nc_u32_e32 v5, v5, v38
+; GFX10-NEXT: v_sub_nc_u32_e32 v6, v6, v19
+; GFX10-NEXT: v_max_i32_e32 v18, v18, v24
+; GFX10-NEXT: v_max_i32_e32 v20, v20, v25
+; GFX10-NEXT: v_sub_nc_u32_e32 v19, v22, v35
+; GFX10-NEXT: v_sub_nc_u32_e32 v21, v21, v35
+; GFX10-NEXT: v_sub_nc_u32_e32 v7, v7, v16
+; GFX10-NEXT: v_max_i32_e32 v16, -1, v11
+; GFX10-NEXT: v_min_i32_e32 v38, -1, v10
+; GFX10-NEXT: v_sub_nc_u32_e32 v23, v39, v34
+; GFX10-NEXT: v_min_i32_e32 v17, v18, v19
+; GFX10-NEXT: v_min_i32_e32 v20, v20, v21
+; GFX10-NEXT: v_sub_nc_u32_e32 v16, v16, v34
+; GFX10-NEXT: v_sub_nc_u32_e32 v18, v38, v35
+; GFX10-NEXT: v_max_i32_e32 v19, v23, v26
+; GFX10-NEXT: v_sub_nc_u32_e32 v8, v8, v17
+; GFX10-NEXT: v_sub_nc_u32_e32 v9, v9, v20
+; GFX10-NEXT: v_max_i32_e32 v20, -1, v13
+; GFX10-NEXT: v_max_i32_e32 v16, v16, v27
+; GFX10-NEXT: v_min_i32_e32 v17, v19, v18
+; GFX10-NEXT: v_max_i32_e32 v19, -1, v12
+; GFX10-NEXT: v_max_i32_e32 v27, -1, v14
+; GFX10-NEXT: v_max_i32_e32 v23, -1, v15
+; GFX10-NEXT: v_min_i32_e32 v18, -1, v11
+; GFX10-NEXT: v_min_i32_e32 v21, -1, v13
+; GFX10-NEXT: v_sub_nc_u32_e32 v19, v19, v34
+; GFX10-NEXT: v_sub_nc_u32_e32 v20, v20, v34
+; GFX10-NEXT: v_min_i32_e32 v24, -1, v14
+; GFX10-NEXT: v_min_i32_e32 v25, -1, v15
+; GFX10-NEXT: v_sub_nc_u32_e32 v26, v23, v34
+; GFX10-NEXT: v_sub_nc_u32_e32 v10, v10, v17
+; GFX10-NEXT: v_min_i32_e32 v17, -1, v12
+; GFX10-NEXT: v_sub_nc_u32_e32 v27, v27, v34
+; GFX10-NEXT: v_sub_nc_u32_e32 v18, v18, v35
+; GFX10-NEXT: v_max_i32_e32 v19, v19, v28
+; GFX10-NEXT: v_sub_nc_u32_e32 v21, v21, v35
+; GFX10-NEXT: v_sub_nc_u32_e32 v17, v17, v35
+; GFX10-NEXT: v_max_i32_e32 v20, v20, v29
+; GFX10-NEXT: v_sub_nc_u32_e32 v24, v24, v35
+; GFX10-NEXT: v_max_i32_e32 v22, v27, v30
+; GFX10-NEXT: v_sub_nc_u32_e32 v25, v25, v35
+; GFX10-NEXT: v_max_i32_e32 v23, v26, v31
+; GFX10-NEXT: v_min_i32_e32 v16, v16, v18
+; GFX10-NEXT: v_min_i32_e32 v17, v19, v17
+; GFX10-NEXT: v_min_i32_e32 v18, v20, v21
+; GFX10-NEXT: v_min_i32_e32 v19, v22, v24
+; GFX10-NEXT: v_min_i32_e32 v20, v23, v25
+; GFX10-NEXT: v_sub_nc_u32_e32 v11, v11, v16
+; GFX10-NEXT: v_sub_nc_u32_e32 v12, v12, v17
+; GFX10-NEXT: v_sub_nc_u32_e32 v13, v13, v18
+; GFX10-NEXT: v_sub_nc_u32_e32 v14, v14, v19
+; GFX10-NEXT: v_sub_nc_u32_e32 v15, v15, v20
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %lhs, <16 x i32> %rhs)
+ ret <16 x i32> %result
+}
+
+define amdgpu_ps <16 x i32> @s_ssubsat_v16i32(<16 x i32> inreg %lhs, <16 x i32> inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_v16i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_brev_b32 s32, -2
+; GFX6-NEXT: s_cselect_b32 s34, s0, -1
+; GFX6-NEXT: s_sub_i32 s34, s34, s32
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_mov_b32 s33, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s35, s0, -1
+; GFX6-NEXT: s_sub_i32 s35, s35, s33
+; GFX6-NEXT: s_cmp_gt_i32 s34, s16
+; GFX6-NEXT: s_cselect_b32 s16, s34, s16
+; GFX6-NEXT: s_cmp_lt_i32 s16, s35
+; GFX6-NEXT: s_cselect_b32 s16, s16, s35
+; GFX6-NEXT: s_sub_i32 s0, s0, s16
+; GFX6-NEXT: s_cmp_gt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s16, s1, -1
+; GFX6-NEXT: s_sub_i32 s16, s16, s32
+; GFX6-NEXT: s_cmp_lt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s34, s1, -1
+; GFX6-NEXT: s_sub_i32 s34, s34, s33
+; GFX6-NEXT: s_cmp_gt_i32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_cmp_lt_i32 s16, s34
+; GFX6-NEXT: s_cselect_b32 s16, s16, s34
+; GFX6-NEXT: s_sub_i32 s1, s1, s16
+; GFX6-NEXT: s_cmp_gt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s16, s2, -1
+; GFX6-NEXT: s_sub_i32 s16, s16, s32
+; GFX6-NEXT: s_cmp_lt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s17, s2, -1
+; GFX6-NEXT: s_sub_i32 s17, s17, s33
+; GFX6-NEXT: s_cmp_gt_i32 s16, s18
+; GFX6-NEXT: s_cselect_b32 s16, s16, s18
+; GFX6-NEXT: s_cmp_lt_i32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_sub_i32 s2, s2, s16
+; GFX6-NEXT: s_cmp_gt_i32 s3, -1
+; GFX6-NEXT: s_cselect_b32 s16, s3, -1
+; GFX6-NEXT: s_sub_i32 s16, s16, s32
+; GFX6-NEXT: s_cmp_lt_i32 s3, -1
+; GFX6-NEXT: s_cselect_b32 s17, s3, -1
+; GFX6-NEXT: s_sub_i32 s17, s17, s33
+; GFX6-NEXT: s_cmp_gt_i32 s16, s19
+; GFX6-NEXT: s_cselect_b32 s16, s16, s19
+; GFX6-NEXT: s_cmp_lt_i32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_sub_i32 s3, s3, s16
+; GFX6-NEXT: s_cmp_gt_i32 s4, -1
+; GFX6-NEXT: s_cselect_b32 s16, s4, -1
+; GFX6-NEXT: s_sub_i32 s16, s16, s32
+; GFX6-NEXT: s_cmp_lt_i32 s4, -1
+; GFX6-NEXT: s_cselect_b32 s17, s4, -1
+; GFX6-NEXT: s_sub_i32 s17, s17, s33
+; GFX6-NEXT: s_cmp_gt_i32 s16, s20
+; GFX6-NEXT: s_cselect_b32 s16, s16, s20
+; GFX6-NEXT: s_cmp_lt_i32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_sub_i32 s4, s4, s16
+; GFX6-NEXT: s_cmp_gt_i32 s5, -1
+; GFX6-NEXT: s_cselect_b32 s16, s5, -1
+; GFX6-NEXT: s_sub_i32 s16, s16, s32
+; GFX6-NEXT: s_cmp_lt_i32 s5, -1
+; GFX6-NEXT: s_cselect_b32 s17, s5, -1
+; GFX6-NEXT: s_sub_i32 s17, s17, s33
+; GFX6-NEXT: s_cmp_gt_i32 s16, s21
+; GFX6-NEXT: s_cselect_b32 s16, s16, s21
+; GFX6-NEXT: s_cmp_lt_i32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_sub_i32 s5, s5, s16
+; GFX6-NEXT: s_cmp_gt_i32 s6, -1
+; GFX6-NEXT: s_cselect_b32 s16, s6, -1
+; GFX6-NEXT: s_sub_i32 s16, s16, s32
+; GFX6-NEXT: s_cmp_lt_i32 s6, -1
+; GFX6-NEXT: s_cselect_b32 s17, s6, -1
+; GFX6-NEXT: s_sub_i32 s17, s17, s33
+; GFX6-NEXT: s_cmp_gt_i32 s16, s22
+; GFX6-NEXT: s_cselect_b32 s16, s16, s22
+; GFX6-NEXT: s_cmp_lt_i32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_sub_i32 s6, s6, s16
+; GFX6-NEXT: s_cmp_gt_i32 s7, -1
+; GFX6-NEXT: s_cselect_b32 s16, s7, -1
+; GFX6-NEXT: s_sub_i32 s16, s16, s32
+; GFX6-NEXT: s_cmp_lt_i32 s7, -1
+; GFX6-NEXT: s_cselect_b32 s17, s7, -1
+; GFX6-NEXT: s_sub_i32 s17, s17, s33
+; GFX6-NEXT: s_cmp_gt_i32 s16, s23
+; GFX6-NEXT: s_cselect_b32 s16, s16, s23
+; GFX6-NEXT: s_cmp_lt_i32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_sub_i32 s7, s7, s16
+; GFX6-NEXT: s_cmp_gt_i32 s8, -1
+; GFX6-NEXT: s_cselect_b32 s16, s8, -1
+; GFX6-NEXT: s_sub_i32 s16, s16, s32
+; GFX6-NEXT: s_cmp_lt_i32 s8, -1
+; GFX6-NEXT: s_cselect_b32 s17, s8, -1
+; GFX6-NEXT: s_sub_i32 s17, s17, s33
+; GFX6-NEXT: s_cmp_gt_i32 s16, s24
+; GFX6-NEXT: s_cselect_b32 s16, s16, s24
+; GFX6-NEXT: s_cmp_lt_i32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_sub_i32 s8, s8, s16
+; GFX6-NEXT: s_cmp_gt_i32 s9, -1
+; GFX6-NEXT: s_cselect_b32 s16, s9, -1
+; GFX6-NEXT: s_sub_i32 s16, s16, s32
+; GFX6-NEXT: s_cmp_lt_i32 s9, -1
+; GFX6-NEXT: s_cselect_b32 s17, s9, -1
+; GFX6-NEXT: s_sub_i32 s17, s17, s33
+; GFX6-NEXT: s_cmp_gt_i32 s16, s25
+; GFX6-NEXT: s_cselect_b32 s16, s16, s25
+; GFX6-NEXT: s_cmp_lt_i32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_sub_i32 s9, s9, s16
+; GFX6-NEXT: s_cmp_gt_i32 s10, -1
+; GFX6-NEXT: s_cselect_b32 s16, s10, -1
+; GFX6-NEXT: s_sub_i32 s16, s16, s32
+; GFX6-NEXT: s_cmp_lt_i32 s10, -1
+; GFX6-NEXT: s_cselect_b32 s17, s10, -1
+; GFX6-NEXT: s_sub_i32 s17, s17, s33
+; GFX6-NEXT: s_cmp_gt_i32 s16, s26
+; GFX6-NEXT: s_cselect_b32 s16, s16, s26
+; GFX6-NEXT: s_cmp_lt_i32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_sub_i32 s10, s10, s16
+; GFX6-NEXT: s_cmp_gt_i32 s11, -1
+; GFX6-NEXT: s_cselect_b32 s16, s11, -1
+; GFX6-NEXT: s_sub_i32 s16, s16, s32
+; GFX6-NEXT: s_cmp_lt_i32 s11, -1
+; GFX6-NEXT: s_cselect_b32 s17, s11, -1
+; GFX6-NEXT: s_sub_i32 s17, s17, s33
+; GFX6-NEXT: s_cmp_gt_i32 s16, s27
+; GFX6-NEXT: s_cselect_b32 s16, s16, s27
+; GFX6-NEXT: s_cmp_lt_i32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_sub_i32 s11, s11, s16
+; GFX6-NEXT: s_cmp_gt_i32 s12, -1
+; GFX6-NEXT: s_cselect_b32 s16, s12, -1
+; GFX6-NEXT: s_sub_i32 s16, s16, s32
+; GFX6-NEXT: s_cmp_lt_i32 s12, -1
+; GFX6-NEXT: s_cselect_b32 s17, s12, -1
+; GFX6-NEXT: s_sub_i32 s17, s17, s33
+; GFX6-NEXT: s_cmp_gt_i32 s16, s28
+; GFX6-NEXT: s_cselect_b32 s16, s16, s28
+; GFX6-NEXT: s_cmp_lt_i32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_sub_i32 s12, s12, s16
+; GFX6-NEXT: s_cmp_gt_i32 s13, -1
+; GFX6-NEXT: s_cselect_b32 s16, s13, -1
+; GFX6-NEXT: s_sub_i32 s16, s16, s32
+; GFX6-NEXT: s_cmp_lt_i32 s13, -1
+; GFX6-NEXT: s_cselect_b32 s17, s13, -1
+; GFX6-NEXT: s_sub_i32 s17, s17, s33
+; GFX6-NEXT: s_cmp_gt_i32 s16, s29
+; GFX6-NEXT: s_cselect_b32 s16, s16, s29
+; GFX6-NEXT: s_cmp_lt_i32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_sub_i32 s13, s13, s16
+; GFX6-NEXT: s_cmp_gt_i32 s14, -1
+; GFX6-NEXT: s_cselect_b32 s16, s14, -1
+; GFX6-NEXT: s_sub_i32 s16, s16, s32
+; GFX6-NEXT: s_cmp_lt_i32 s14, -1
+; GFX6-NEXT: s_cselect_b32 s17, s14, -1
+; GFX6-NEXT: s_sub_i32 s17, s17, s33
+; GFX6-NEXT: s_cmp_gt_i32 s16, s30
+; GFX6-NEXT: s_cselect_b32 s16, s16, s30
+; GFX6-NEXT: s_cmp_lt_i32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_sub_i32 s14, s14, s16
+; GFX6-NEXT: s_cmp_gt_i32 s15, -1
+; GFX6-NEXT: s_cselect_b32 s16, s15, -1
+; GFX6-NEXT: s_sub_i32 s16, s16, s32
+; GFX6-NEXT: s_cmp_lt_i32 s15, -1
+; GFX6-NEXT: s_cselect_b32 s17, s15, -1
+; GFX6-NEXT: s_sub_i32 s17, s17, s33
+; GFX6-NEXT: s_cmp_gt_i32 s16, s31
+; GFX6-NEXT: s_cselect_b32 s16, s16, s31
+; GFX6-NEXT: s_cmp_lt_i32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_sub_i32 s15, s15, s16
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_v16i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_gt_i32 s0, -1
+; GFX8-NEXT: s_brev_b32 s32, -2
+; GFX8-NEXT: s_cselect_b32 s34, s0, -1
+; GFX8-NEXT: s_sub_i32 s34, s34, s32
+; GFX8-NEXT: s_cmp_lt_i32 s0, -1
+; GFX8-NEXT: s_mov_b32 s33, 0x80000000
+; GFX8-NEXT: s_cselect_b32 s35, s0, -1
+; GFX8-NEXT: s_sub_i32 s35, s35, s33
+; GFX8-NEXT: s_cmp_gt_i32 s34, s16
+; GFX8-NEXT: s_cselect_b32 s16, s34, s16
+; GFX8-NEXT: s_cmp_lt_i32 s16, s35
+; GFX8-NEXT: s_cselect_b32 s16, s16, s35
+; GFX8-NEXT: s_sub_i32 s0, s0, s16
+; GFX8-NEXT: s_cmp_gt_i32 s1, -1
+; GFX8-NEXT: s_cselect_b32 s16, s1, -1
+; GFX8-NEXT: s_sub_i32 s16, s16, s32
+; GFX8-NEXT: s_cmp_lt_i32 s1, -1
+; GFX8-NEXT: s_cselect_b32 s34, s1, -1
+; GFX8-NEXT: s_sub_i32 s34, s34, s33
+; GFX8-NEXT: s_cmp_gt_i32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_cmp_lt_i32 s16, s34
+; GFX8-NEXT: s_cselect_b32 s16, s16, s34
+; GFX8-NEXT: s_sub_i32 s1, s1, s16
+; GFX8-NEXT: s_cmp_gt_i32 s2, -1
+; GFX8-NEXT: s_cselect_b32 s16, s2, -1
+; GFX8-NEXT: s_sub_i32 s16, s16, s32
+; GFX8-NEXT: s_cmp_lt_i32 s2, -1
+; GFX8-NEXT: s_cselect_b32 s17, s2, -1
+; GFX8-NEXT: s_sub_i32 s17, s17, s33
+; GFX8-NEXT: s_cmp_gt_i32 s16, s18
+; GFX8-NEXT: s_cselect_b32 s16, s16, s18
+; GFX8-NEXT: s_cmp_lt_i32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_sub_i32 s2, s2, s16
+; GFX8-NEXT: s_cmp_gt_i32 s3, -1
+; GFX8-NEXT: s_cselect_b32 s16, s3, -1
+; GFX8-NEXT: s_sub_i32 s16, s16, s32
+; GFX8-NEXT: s_cmp_lt_i32 s3, -1
+; GFX8-NEXT: s_cselect_b32 s17, s3, -1
+; GFX8-NEXT: s_sub_i32 s17, s17, s33
+; GFX8-NEXT: s_cmp_gt_i32 s16, s19
+; GFX8-NEXT: s_cselect_b32 s16, s16, s19
+; GFX8-NEXT: s_cmp_lt_i32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_sub_i32 s3, s3, s16
+; GFX8-NEXT: s_cmp_gt_i32 s4, -1
+; GFX8-NEXT: s_cselect_b32 s16, s4, -1
+; GFX8-NEXT: s_sub_i32 s16, s16, s32
+; GFX8-NEXT: s_cmp_lt_i32 s4, -1
+; GFX8-NEXT: s_cselect_b32 s17, s4, -1
+; GFX8-NEXT: s_sub_i32 s17, s17, s33
+; GFX8-NEXT: s_cmp_gt_i32 s16, s20
+; GFX8-NEXT: s_cselect_b32 s16, s16, s20
+; GFX8-NEXT: s_cmp_lt_i32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_sub_i32 s4, s4, s16
+; GFX8-NEXT: s_cmp_gt_i32 s5, -1
+; GFX8-NEXT: s_cselect_b32 s16, s5, -1
+; GFX8-NEXT: s_sub_i32 s16, s16, s32
+; GFX8-NEXT: s_cmp_lt_i32 s5, -1
+; GFX8-NEXT: s_cselect_b32 s17, s5, -1
+; GFX8-NEXT: s_sub_i32 s17, s17, s33
+; GFX8-NEXT: s_cmp_gt_i32 s16, s21
+; GFX8-NEXT: s_cselect_b32 s16, s16, s21
+; GFX8-NEXT: s_cmp_lt_i32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_sub_i32 s5, s5, s16
+; GFX8-NEXT: s_cmp_gt_i32 s6, -1
+; GFX8-NEXT: s_cselect_b32 s16, s6, -1
+; GFX8-NEXT: s_sub_i32 s16, s16, s32
+; GFX8-NEXT: s_cmp_lt_i32 s6, -1
+; GFX8-NEXT: s_cselect_b32 s17, s6, -1
+; GFX8-NEXT: s_sub_i32 s17, s17, s33
+; GFX8-NEXT: s_cmp_gt_i32 s16, s22
+; GFX8-NEXT: s_cselect_b32 s16, s16, s22
+; GFX8-NEXT: s_cmp_lt_i32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_sub_i32 s6, s6, s16
+; GFX8-NEXT: s_cmp_gt_i32 s7, -1
+; GFX8-NEXT: s_cselect_b32 s16, s7, -1
+; GFX8-NEXT: s_sub_i32 s16, s16, s32
+; GFX8-NEXT: s_cmp_lt_i32 s7, -1
+; GFX8-NEXT: s_cselect_b32 s17, s7, -1
+; GFX8-NEXT: s_sub_i32 s17, s17, s33
+; GFX8-NEXT: s_cmp_gt_i32 s16, s23
+; GFX8-NEXT: s_cselect_b32 s16, s16, s23
+; GFX8-NEXT: s_cmp_lt_i32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_sub_i32 s7, s7, s16
+; GFX8-NEXT: s_cmp_gt_i32 s8, -1
+; GFX8-NEXT: s_cselect_b32 s16, s8, -1
+; GFX8-NEXT: s_sub_i32 s16, s16, s32
+; GFX8-NEXT: s_cmp_lt_i32 s8, -1
+; GFX8-NEXT: s_cselect_b32 s17, s8, -1
+; GFX8-NEXT: s_sub_i32 s17, s17, s33
+; GFX8-NEXT: s_cmp_gt_i32 s16, s24
+; GFX8-NEXT: s_cselect_b32 s16, s16, s24
+; GFX8-NEXT: s_cmp_lt_i32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_sub_i32 s8, s8, s16
+; GFX8-NEXT: s_cmp_gt_i32 s9, -1
+; GFX8-NEXT: s_cselect_b32 s16, s9, -1
+; GFX8-NEXT: s_sub_i32 s16, s16, s32
+; GFX8-NEXT: s_cmp_lt_i32 s9, -1
+; GFX8-NEXT: s_cselect_b32 s17, s9, -1
+; GFX8-NEXT: s_sub_i32 s17, s17, s33
+; GFX8-NEXT: s_cmp_gt_i32 s16, s25
+; GFX8-NEXT: s_cselect_b32 s16, s16, s25
+; GFX8-NEXT: s_cmp_lt_i32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_sub_i32 s9, s9, s16
+; GFX8-NEXT: s_cmp_gt_i32 s10, -1
+; GFX8-NEXT: s_cselect_b32 s16, s10, -1
+; GFX8-NEXT: s_sub_i32 s16, s16, s32
+; GFX8-NEXT: s_cmp_lt_i32 s10, -1
+; GFX8-NEXT: s_cselect_b32 s17, s10, -1
+; GFX8-NEXT: s_sub_i32 s17, s17, s33
+; GFX8-NEXT: s_cmp_gt_i32 s16, s26
+; GFX8-NEXT: s_cselect_b32 s16, s16, s26
+; GFX8-NEXT: s_cmp_lt_i32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_sub_i32 s10, s10, s16
+; GFX8-NEXT: s_cmp_gt_i32 s11, -1
+; GFX8-NEXT: s_cselect_b32 s16, s11, -1
+; GFX8-NEXT: s_sub_i32 s16, s16, s32
+; GFX8-NEXT: s_cmp_lt_i32 s11, -1
+; GFX8-NEXT: s_cselect_b32 s17, s11, -1
+; GFX8-NEXT: s_sub_i32 s17, s17, s33
+; GFX8-NEXT: s_cmp_gt_i32 s16, s27
+; GFX8-NEXT: s_cselect_b32 s16, s16, s27
+; GFX8-NEXT: s_cmp_lt_i32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_sub_i32 s11, s11, s16
+; GFX8-NEXT: s_cmp_gt_i32 s12, -1
+; GFX8-NEXT: s_cselect_b32 s16, s12, -1
+; GFX8-NEXT: s_sub_i32 s16, s16, s32
+; GFX8-NEXT: s_cmp_lt_i32 s12, -1
+; GFX8-NEXT: s_cselect_b32 s17, s12, -1
+; GFX8-NEXT: s_sub_i32 s17, s17, s33
+; GFX8-NEXT: s_cmp_gt_i32 s16, s28
+; GFX8-NEXT: s_cselect_b32 s16, s16, s28
+; GFX8-NEXT: s_cmp_lt_i32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_sub_i32 s12, s12, s16
+; GFX8-NEXT: s_cmp_gt_i32 s13, -1
+; GFX8-NEXT: s_cselect_b32 s16, s13, -1
+; GFX8-NEXT: s_sub_i32 s16, s16, s32
+; GFX8-NEXT: s_cmp_lt_i32 s13, -1
+; GFX8-NEXT: s_cselect_b32 s17, s13, -1
+; GFX8-NEXT: s_sub_i32 s17, s17, s33
+; GFX8-NEXT: s_cmp_gt_i32 s16, s29
+; GFX8-NEXT: s_cselect_b32 s16, s16, s29
+; GFX8-NEXT: s_cmp_lt_i32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_sub_i32 s13, s13, s16
+; GFX8-NEXT: s_cmp_gt_i32 s14, -1
+; GFX8-NEXT: s_cselect_b32 s16, s14, -1
+; GFX8-NEXT: s_sub_i32 s16, s16, s32
+; GFX8-NEXT: s_cmp_lt_i32 s14, -1
+; GFX8-NEXT: s_cselect_b32 s17, s14, -1
+; GFX8-NEXT: s_sub_i32 s17, s17, s33
+; GFX8-NEXT: s_cmp_gt_i32 s16, s30
+; GFX8-NEXT: s_cselect_b32 s16, s16, s30
+; GFX8-NEXT: s_cmp_lt_i32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_sub_i32 s14, s14, s16
+; GFX8-NEXT: s_cmp_gt_i32 s15, -1
+; GFX8-NEXT: s_cselect_b32 s16, s15, -1
+; GFX8-NEXT: s_sub_i32 s16, s16, s32
+; GFX8-NEXT: s_cmp_lt_i32 s15, -1
+; GFX8-NEXT: s_cselect_b32 s17, s15, -1
+; GFX8-NEXT: s_sub_i32 s17, s17, s33
+; GFX8-NEXT: s_cmp_gt_i32 s16, s31
+; GFX8-NEXT: s_cselect_b32 s16, s16, s31
+; GFX8-NEXT: s_cmp_lt_i32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_sub_i32 s15, s15, s16
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_v16i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_gt_i32 s0, -1
+; GFX9-NEXT: s_brev_b32 s32, -2
+; GFX9-NEXT: s_cselect_b32 s34, s0, -1
+; GFX9-NEXT: s_sub_i32 s34, s34, s32
+; GFX9-NEXT: s_cmp_lt_i32 s0, -1
+; GFX9-NEXT: s_mov_b32 s33, 0x80000000
+; GFX9-NEXT: s_cselect_b32 s35, s0, -1
+; GFX9-NEXT: s_sub_i32 s35, s35, s33
+; GFX9-NEXT: s_cmp_gt_i32 s34, s16
+; GFX9-NEXT: s_cselect_b32 s16, s34, s16
+; GFX9-NEXT: s_cmp_lt_i32 s16, s35
+; GFX9-NEXT: s_cselect_b32 s16, s16, s35
+; GFX9-NEXT: s_sub_i32 s0, s0, s16
+; GFX9-NEXT: s_cmp_gt_i32 s1, -1
+; GFX9-NEXT: s_cselect_b32 s16, s1, -1
+; GFX9-NEXT: s_sub_i32 s16, s16, s32
+; GFX9-NEXT: s_cmp_lt_i32 s1, -1
+; GFX9-NEXT: s_cselect_b32 s34, s1, -1
+; GFX9-NEXT: s_sub_i32 s34, s34, s33
+; GFX9-NEXT: s_cmp_gt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_cmp_lt_i32 s16, s34
+; GFX9-NEXT: s_cselect_b32 s16, s16, s34
+; GFX9-NEXT: s_sub_i32 s1, s1, s16
+; GFX9-NEXT: s_cmp_gt_i32 s2, -1
+; GFX9-NEXT: s_cselect_b32 s16, s2, -1
+; GFX9-NEXT: s_sub_i32 s16, s16, s32
+; GFX9-NEXT: s_cmp_lt_i32 s2, -1
+; GFX9-NEXT: s_cselect_b32 s17, s2, -1
+; GFX9-NEXT: s_sub_i32 s17, s17, s33
+; GFX9-NEXT: s_cmp_gt_i32 s16, s18
+; GFX9-NEXT: s_cselect_b32 s16, s16, s18
+; GFX9-NEXT: s_cmp_lt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_sub_i32 s2, s2, s16
+; GFX9-NEXT: s_cmp_gt_i32 s3, -1
+; GFX9-NEXT: s_cselect_b32 s16, s3, -1
+; GFX9-NEXT: s_sub_i32 s16, s16, s32
+; GFX9-NEXT: s_cmp_lt_i32 s3, -1
+; GFX9-NEXT: s_cselect_b32 s17, s3, -1
+; GFX9-NEXT: s_sub_i32 s17, s17, s33
+; GFX9-NEXT: s_cmp_gt_i32 s16, s19
+; GFX9-NEXT: s_cselect_b32 s16, s16, s19
+; GFX9-NEXT: s_cmp_lt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_sub_i32 s3, s3, s16
+; GFX9-NEXT: s_cmp_gt_i32 s4, -1
+; GFX9-NEXT: s_cselect_b32 s16, s4, -1
+; GFX9-NEXT: s_sub_i32 s16, s16, s32
+; GFX9-NEXT: s_cmp_lt_i32 s4, -1
+; GFX9-NEXT: s_cselect_b32 s17, s4, -1
+; GFX9-NEXT: s_sub_i32 s17, s17, s33
+; GFX9-NEXT: s_cmp_gt_i32 s16, s20
+; GFX9-NEXT: s_cselect_b32 s16, s16, s20
+; GFX9-NEXT: s_cmp_lt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_sub_i32 s4, s4, s16
+; GFX9-NEXT: s_cmp_gt_i32 s5, -1
+; GFX9-NEXT: s_cselect_b32 s16, s5, -1
+; GFX9-NEXT: s_sub_i32 s16, s16, s32
+; GFX9-NEXT: s_cmp_lt_i32 s5, -1
+; GFX9-NEXT: s_cselect_b32 s17, s5, -1
+; GFX9-NEXT: s_sub_i32 s17, s17, s33
+; GFX9-NEXT: s_cmp_gt_i32 s16, s21
+; GFX9-NEXT: s_cselect_b32 s16, s16, s21
+; GFX9-NEXT: s_cmp_lt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_sub_i32 s5, s5, s16
+; GFX9-NEXT: s_cmp_gt_i32 s6, -1
+; GFX9-NEXT: s_cselect_b32 s16, s6, -1
+; GFX9-NEXT: s_sub_i32 s16, s16, s32
+; GFX9-NEXT: s_cmp_lt_i32 s6, -1
+; GFX9-NEXT: s_cselect_b32 s17, s6, -1
+; GFX9-NEXT: s_sub_i32 s17, s17, s33
+; GFX9-NEXT: s_cmp_gt_i32 s16, s22
+; GFX9-NEXT: s_cselect_b32 s16, s16, s22
+; GFX9-NEXT: s_cmp_lt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_sub_i32 s6, s6, s16
+; GFX9-NEXT: s_cmp_gt_i32 s7, -1
+; GFX9-NEXT: s_cselect_b32 s16, s7, -1
+; GFX9-NEXT: s_sub_i32 s16, s16, s32
+; GFX9-NEXT: s_cmp_lt_i32 s7, -1
+; GFX9-NEXT: s_cselect_b32 s17, s7, -1
+; GFX9-NEXT: s_sub_i32 s17, s17, s33
+; GFX9-NEXT: s_cmp_gt_i32 s16, s23
+; GFX9-NEXT: s_cselect_b32 s16, s16, s23
+; GFX9-NEXT: s_cmp_lt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_sub_i32 s7, s7, s16
+; GFX9-NEXT: s_cmp_gt_i32 s8, -1
+; GFX9-NEXT: s_cselect_b32 s16, s8, -1
+; GFX9-NEXT: s_sub_i32 s16, s16, s32
+; GFX9-NEXT: s_cmp_lt_i32 s8, -1
+; GFX9-NEXT: s_cselect_b32 s17, s8, -1
+; GFX9-NEXT: s_sub_i32 s17, s17, s33
+; GFX9-NEXT: s_cmp_gt_i32 s16, s24
+; GFX9-NEXT: s_cselect_b32 s16, s16, s24
+; GFX9-NEXT: s_cmp_lt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_sub_i32 s8, s8, s16
+; GFX9-NEXT: s_cmp_gt_i32 s9, -1
+; GFX9-NEXT: s_cselect_b32 s16, s9, -1
+; GFX9-NEXT: s_sub_i32 s16, s16, s32
+; GFX9-NEXT: s_cmp_lt_i32 s9, -1
+; GFX9-NEXT: s_cselect_b32 s17, s9, -1
+; GFX9-NEXT: s_sub_i32 s17, s17, s33
+; GFX9-NEXT: s_cmp_gt_i32 s16, s25
+; GFX9-NEXT: s_cselect_b32 s16, s16, s25
+; GFX9-NEXT: s_cmp_lt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_sub_i32 s9, s9, s16
+; GFX9-NEXT: s_cmp_gt_i32 s10, -1
+; GFX9-NEXT: s_cselect_b32 s16, s10, -1
+; GFX9-NEXT: s_sub_i32 s16, s16, s32
+; GFX9-NEXT: s_cmp_lt_i32 s10, -1
+; GFX9-NEXT: s_cselect_b32 s17, s10, -1
+; GFX9-NEXT: s_sub_i32 s17, s17, s33
+; GFX9-NEXT: s_cmp_gt_i32 s16, s26
+; GFX9-NEXT: s_cselect_b32 s16, s16, s26
+; GFX9-NEXT: s_cmp_lt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_sub_i32 s10, s10, s16
+; GFX9-NEXT: s_cmp_gt_i32 s11, -1
+; GFX9-NEXT: s_cselect_b32 s16, s11, -1
+; GFX9-NEXT: s_sub_i32 s16, s16, s32
+; GFX9-NEXT: s_cmp_lt_i32 s11, -1
+; GFX9-NEXT: s_cselect_b32 s17, s11, -1
+; GFX9-NEXT: s_sub_i32 s17, s17, s33
+; GFX9-NEXT: s_cmp_gt_i32 s16, s27
+; GFX9-NEXT: s_cselect_b32 s16, s16, s27
+; GFX9-NEXT: s_cmp_lt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_sub_i32 s11, s11, s16
+; GFX9-NEXT: s_cmp_gt_i32 s12, -1
+; GFX9-NEXT: s_cselect_b32 s16, s12, -1
+; GFX9-NEXT: s_sub_i32 s16, s16, s32
+; GFX9-NEXT: s_cmp_lt_i32 s12, -1
+; GFX9-NEXT: s_cselect_b32 s17, s12, -1
+; GFX9-NEXT: s_sub_i32 s17, s17, s33
+; GFX9-NEXT: s_cmp_gt_i32 s16, s28
+; GFX9-NEXT: s_cselect_b32 s16, s16, s28
+; GFX9-NEXT: s_cmp_lt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_sub_i32 s12, s12, s16
+; GFX9-NEXT: s_cmp_gt_i32 s13, -1
+; GFX9-NEXT: s_cselect_b32 s16, s13, -1
+; GFX9-NEXT: s_sub_i32 s16, s16, s32
+; GFX9-NEXT: s_cmp_lt_i32 s13, -1
+; GFX9-NEXT: s_cselect_b32 s17, s13, -1
+; GFX9-NEXT: s_sub_i32 s17, s17, s33
+; GFX9-NEXT: s_cmp_gt_i32 s16, s29
+; GFX9-NEXT: s_cselect_b32 s16, s16, s29
+; GFX9-NEXT: s_cmp_lt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_sub_i32 s13, s13, s16
+; GFX9-NEXT: s_cmp_gt_i32 s14, -1
+; GFX9-NEXT: s_cselect_b32 s16, s14, -1
+; GFX9-NEXT: s_sub_i32 s16, s16, s32
+; GFX9-NEXT: s_cmp_lt_i32 s14, -1
+; GFX9-NEXT: s_cselect_b32 s17, s14, -1
+; GFX9-NEXT: s_sub_i32 s17, s17, s33
+; GFX9-NEXT: s_cmp_gt_i32 s16, s30
+; GFX9-NEXT: s_cselect_b32 s16, s16, s30
+; GFX9-NEXT: s_cmp_lt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_sub_i32 s14, s14, s16
+; GFX9-NEXT: s_cmp_gt_i32 s15, -1
+; GFX9-NEXT: s_cselect_b32 s16, s15, -1
+; GFX9-NEXT: s_sub_i32 s16, s16, s32
+; GFX9-NEXT: s_cmp_lt_i32 s15, -1
+; GFX9-NEXT: s_cselect_b32 s17, s15, -1
+; GFX9-NEXT: s_sub_i32 s17, s17, s33
+; GFX9-NEXT: s_cmp_gt_i32 s16, s31
+; GFX9-NEXT: s_cselect_b32 s16, s16, s31
+; GFX9-NEXT: s_cmp_lt_i32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_sub_i32 s15, s15, s16
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_v16i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_gt_i32 s0, -1
+; GFX10-NEXT: s_brev_b32 s46, -2
+; GFX10-NEXT: s_cselect_b32 s33, s0, -1
+; GFX10-NEXT: s_mov_b32 s34, 0x80000000
+; GFX10-NEXT: s_sub_i32 s47, s33, s46
+; GFX10-NEXT: s_cmp_lt_i32 s0, -1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s35, s0, -1
+; GFX10-NEXT: s_sub_i32 s35, s35, s34
+; GFX10-NEXT: s_cmp_gt_i32 s47, s16
+; GFX10-NEXT: s_cselect_b32 s16, s47, s16
+; GFX10-NEXT: s_cmp_lt_i32 s16, s35
+; GFX10-NEXT: s_cselect_b32 s47, s16, s35
+; GFX10-NEXT: s_sub_i32 s0, s0, s47
+; GFX10-NEXT: s_cmp_gt_i32 s1, -1
+; GFX10-NEXT: s_cselect_b32 s16, s1, -1
+; GFX10-NEXT: s_sub_i32 s16, s16, s46
+; GFX10-NEXT: s_cmp_lt_i32 s1, -1
+; GFX10-NEXT: s_cselect_b32 s33, s1, -1
+; GFX10-NEXT: s_sub_i32 s47, s33, s34
+; GFX10-NEXT: s_cmp_gt_i32 s16, s17
+; GFX10-NEXT: s_cselect_b32 s16, s16, s17
+; GFX10-NEXT: s_cmp_lt_i32 s16, s47
+; GFX10-NEXT: s_cselect_b32 s47, s16, s47
+; GFX10-NEXT: s_sub_i32 s1, s1, s47
+; GFX10-NEXT: s_cmp_gt_i32 s2, -1
+; GFX10-NEXT: s_cselect_b32 s16, s2, -1
+; GFX10-NEXT: s_sub_i32 s16, s16, s46
+; GFX10-NEXT: s_cmp_lt_i32 s2, -1
+; GFX10-NEXT: s_cselect_b32 s17, s2, -1
+; GFX10-NEXT: s_sub_i32 s17, s17, s34
+; GFX10-NEXT: s_cmp_gt_i32 s16, s18
+; GFX10-NEXT: s_cselect_b32 s16, s16, s18
+; GFX10-NEXT: s_cmp_lt_i32 s16, s17
+; GFX10-NEXT: s_cselect_b32 s16, s16, s17
+; GFX10-NEXT: s_sub_i32 s2, s2, s16
+; GFX10-NEXT: s_cmp_gt_i32 s3, -1
+; GFX10-NEXT: s_cselect_b32 s16, s3, -1
+; GFX10-NEXT: s_sub_i32 s16, s16, s46
+; GFX10-NEXT: s_cmp_lt_i32 s3, -1
+; GFX10-NEXT: s_cselect_b32 s17, s3, -1
+; GFX10-NEXT: s_sub_i32 s17, s17, s34
+; GFX10-NEXT: s_cmp_gt_i32 s16, s19
+; GFX10-NEXT: s_cselect_b32 s16, s16, s19
+; GFX10-NEXT: s_cmp_lt_i32 s16, s17
+; GFX10-NEXT: s_cselect_b32 s16, s16, s17
+; GFX10-NEXT: s_sub_i32 s3, s3, s16
+; GFX10-NEXT: s_cmp_gt_i32 s4, -1
+; GFX10-NEXT: s_cselect_b32 s16, s4, -1
+; GFX10-NEXT: s_sub_i32 s16, s16, s46
+; GFX10-NEXT: s_cmp_lt_i32 s4, -1
+; GFX10-NEXT: s_cselect_b32 s17, s4, -1
+; GFX10-NEXT: s_sub_i32 s17, s17, s34
+; GFX10-NEXT: s_cmp_gt_i32 s16, s20
+; GFX10-NEXT: s_cselect_b32 s16, s16, s20
+; GFX10-NEXT: s_cmp_lt_i32 s16, s17
+; GFX10-NEXT: s_cselect_b32 s16, s16, s17
+; GFX10-NEXT: s_sub_i32 s4, s4, s16
+; GFX10-NEXT: s_cmp_gt_i32 s5, -1
+; GFX10-NEXT: s_cselect_b32 s16, s5, -1
+; GFX10-NEXT: s_sub_i32 s16, s16, s46
+; GFX10-NEXT: s_cmp_lt_i32 s5, -1
+; GFX10-NEXT: s_cselect_b32 s17, s5, -1
+; GFX10-NEXT: s_sub_i32 s17, s17, s34
+; GFX10-NEXT: s_cmp_gt_i32 s16, s21
+; GFX10-NEXT: s_cselect_b32 s16, s16, s21
+; GFX10-NEXT: s_cmp_lt_i32 s16, s17
+; GFX10-NEXT: s_cselect_b32 s16, s16, s17
+; GFX10-NEXT: s_sub_i32 s5, s5, s16
+; GFX10-NEXT: s_cmp_gt_i32 s6, -1
+; GFX10-NEXT: s_cselect_b32 s16, s6, -1
+; GFX10-NEXT: s_sub_i32 s16, s16, s46
+; GFX10-NEXT: s_cmp_lt_i32 s6, -1
+; GFX10-NEXT: s_cselect_b32 s17, s6, -1
+; GFX10-NEXT: s_sub_i32 s17, s17, s34
+; GFX10-NEXT: s_cmp_gt_i32 s16, s22
+; GFX10-NEXT: s_cselect_b32 s16, s16, s22
+; GFX10-NEXT: s_cmp_lt_i32 s16, s17
+; GFX10-NEXT: s_cselect_b32 s16, s16, s17
+; GFX10-NEXT: s_sub_i32 s6, s6, s16
+; GFX10-NEXT: s_cmp_gt_i32 s7, -1
+; GFX10-NEXT: s_cselect_b32 s16, s7, -1
+; GFX10-NEXT: s_sub_i32 s16, s16, s46
+; GFX10-NEXT: s_cmp_lt_i32 s7, -1
+; GFX10-NEXT: s_cselect_b32 s17, s7, -1
+; GFX10-NEXT: s_sub_i32 s17, s17, s34
+; GFX10-NEXT: s_cmp_gt_i32 s16, s23
+; GFX10-NEXT: s_cselect_b32 s16, s16, s23
+; GFX10-NEXT: s_cmp_lt_i32 s16, s17
+; GFX10-NEXT: s_cselect_b32 s16, s16, s17
+; GFX10-NEXT: s_sub_i32 s7, s7, s16
+; GFX10-NEXT: s_cmp_gt_i32 s8, -1
+; GFX10-NEXT: s_cselect_b32 s16, s8, -1
+; GFX10-NEXT: s_sub_i32 s16, s16, s46
+; GFX10-NEXT: s_cmp_lt_i32 s8, -1
+; GFX10-NEXT: s_cselect_b32 s17, s8, -1
+; GFX10-NEXT: s_sub_i32 s17, s17, s34
+; GFX10-NEXT: s_cmp_gt_i32 s16, s24
+; GFX10-NEXT: s_cselect_b32 s16, s16, s24
+; GFX10-NEXT: s_cmp_lt_i32 s16, s17
+; GFX10-NEXT: s_cselect_b32 s16, s16, s17
+; GFX10-NEXT: s_sub_i32 s8, s8, s16
+; GFX10-NEXT: s_cmp_gt_i32 s9, -1
+; GFX10-NEXT: s_cselect_b32 s16, s9, -1
+; GFX10-NEXT: s_sub_i32 s16, s16, s46
+; GFX10-NEXT: s_cmp_lt_i32 s9, -1
+; GFX10-NEXT: s_cselect_b32 s17, s9, -1
+; GFX10-NEXT: s_sub_i32 s17, s17, s34
+; GFX10-NEXT: s_cmp_gt_i32 s16, s25
+; GFX10-NEXT: s_cselect_b32 s16, s16, s25
+; GFX10-NEXT: s_cmp_lt_i32 s16, s17
+; GFX10-NEXT: s_cselect_b32 s16, s16, s17
+; GFX10-NEXT: s_sub_i32 s9, s9, s16
+; GFX10-NEXT: s_cmp_gt_i32 s10, -1
+; GFX10-NEXT: s_cselect_b32 s16, s10, -1
+; GFX10-NEXT: s_sub_i32 s16, s16, s46
+; GFX10-NEXT: s_cmp_lt_i32 s10, -1
+; GFX10-NEXT: s_cselect_b32 s17, s10, -1
+; GFX10-NEXT: s_sub_i32 s17, s17, s34
+; GFX10-NEXT: s_cmp_gt_i32 s16, s26
+; GFX10-NEXT: s_cselect_b32 s16, s16, s26
+; GFX10-NEXT: s_cmp_lt_i32 s16, s17
+; GFX10-NEXT: s_cselect_b32 s16, s16, s17
+; GFX10-NEXT: s_sub_i32 s10, s10, s16
+; GFX10-NEXT: s_cmp_gt_i32 s11, -1
+; GFX10-NEXT: s_cselect_b32 s16, s11, -1
+; GFX10-NEXT: s_sub_i32 s16, s16, s46
+; GFX10-NEXT: s_cmp_lt_i32 s11, -1
+; GFX10-NEXT: s_cselect_b32 s17, s11, -1
+; GFX10-NEXT: s_sub_i32 s17, s17, s34
+; GFX10-NEXT: s_cmp_gt_i32 s16, s27
+; GFX10-NEXT: s_cselect_b32 s16, s16, s27
+; GFX10-NEXT: s_cmp_lt_i32 s16, s17
+; GFX10-NEXT: s_cselect_b32 s16, s16, s17
+; GFX10-NEXT: s_sub_i32 s11, s11, s16
+; GFX10-NEXT: s_cmp_gt_i32 s12, -1
+; GFX10-NEXT: s_cselect_b32 s16, s12, -1
+; GFX10-NEXT: s_sub_i32 s16, s16, s46
+; GFX10-NEXT: s_cmp_lt_i32 s12, -1
+; GFX10-NEXT: s_cselect_b32 s17, s12, -1
+; GFX10-NEXT: s_sub_i32 s17, s17, s34
+; GFX10-NEXT: s_cmp_gt_i32 s16, s28
+; GFX10-NEXT: s_cselect_b32 s16, s16, s28
+; GFX10-NEXT: s_cmp_lt_i32 s16, s17
+; GFX10-NEXT: s_cselect_b32 s16, s16, s17
+; GFX10-NEXT: s_sub_i32 s12, s12, s16
+; GFX10-NEXT: s_cmp_gt_i32 s13, -1
+; GFX10-NEXT: s_cselect_b32 s16, s13, -1
+; GFX10-NEXT: s_sub_i32 s16, s16, s46
+; GFX10-NEXT: s_cmp_lt_i32 s13, -1
+; GFX10-NEXT: s_cselect_b32 s17, s13, -1
+; GFX10-NEXT: s_sub_i32 s17, s17, s34
+; GFX10-NEXT: s_cmp_gt_i32 s16, s29
+; GFX10-NEXT: s_cselect_b32 s16, s16, s29
+; GFX10-NEXT: s_cmp_lt_i32 s16, s17
+; GFX10-NEXT: s_cselect_b32 s16, s16, s17
+; GFX10-NEXT: s_sub_i32 s13, s13, s16
+; GFX10-NEXT: s_cmp_gt_i32 s14, -1
+; GFX10-NEXT: s_cselect_b32 s16, s14, -1
+; GFX10-NEXT: s_sub_i32 s16, s16, s46
+; GFX10-NEXT: s_cmp_lt_i32 s14, -1
+; GFX10-NEXT: s_cselect_b32 s17, s14, -1
+; GFX10-NEXT: s_sub_i32 s17, s17, s34
+; GFX10-NEXT: s_cmp_gt_i32 s16, s30
+; GFX10-NEXT: s_cselect_b32 s16, s16, s30
+; GFX10-NEXT: s_cmp_lt_i32 s16, s17
+; GFX10-NEXT: s_cselect_b32 s16, s16, s17
+; GFX10-NEXT: s_sub_i32 s14, s14, s16
+; GFX10-NEXT: s_cmp_gt_i32 s15, -1
+; GFX10-NEXT: s_cselect_b32 s16, s15, -1
+; GFX10-NEXT: s_sub_i32 s16, s16, s46
+; GFX10-NEXT: s_cmp_lt_i32 s15, -1
+; GFX10-NEXT: s_cselect_b32 s17, s15, -1
+; GFX10-NEXT: s_sub_i32 s17, s17, s34
+; GFX10-NEXT: s_cmp_gt_i32 s16, s31
+; GFX10-NEXT: s_cselect_b32 s16, s16, s31
+; GFX10-NEXT: s_cmp_lt_i32 s16, s17
+; GFX10-NEXT: s_cselect_b32 s16, s16, s17
+; GFX10-NEXT: s_sub_i32 s15, s15, s16
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32> %lhs, <16 x i32> %rhs)
+ ret <16 x i32> %result
+}
+
+define i16 @v_ssubsat_i16(i16 %lhs, i16 %rhs) {
+; GFX6-LABEL: v_ssubsat_i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_max_i32_e32 v2, -1, v0
+; GFX6-NEXT: v_min_i32_e32 v3, -1, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, 0x7fffffff, v2
+; GFX6-NEXT: v_subrev_i32_e32 v3, vcc, 0x80000000, v3
+; GFX6-NEXT: v_max_i32_e32 v1, v2, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v3
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s4, 0xffff
+; GFX8-NEXT: v_max_i16_e32 v2, s4, v0
+; GFX8-NEXT: v_min_i16_e32 v3, s4, v0
+; GFX8-NEXT: v_subrev_u16_e32 v2, 0x7fff, v2
+; GFX8-NEXT: v_subrev_u16_e32 v3, 0x8000, v3
+; GFX8-NEXT: v_max_i16_e32 v1, v2, v1
+; GFX8-NEXT: v_min_i16_e32 v1, v1, v3
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 0xffff
+; GFX9-NEXT: v_max_i16_e32 v2, s4, v0
+; GFX9-NEXT: v_min_i16_e32 v3, s4, v0
+; GFX9-NEXT: v_subrev_u16_e32 v2, 0x7fff, v2
+; GFX9-NEXT: v_subrev_u16_e32 v3, 0x8000, v3
+; GFX9-NEXT: v_max_i16_e32 v1, v2, v1
+; GFX9-NEXT: v_min_i16_e32 v1, v1, v3
+; GFX9-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_mov_b32 s4, 0xffff
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_max_i16_e64 v2, v0, s4
+; GFX10-NEXT: v_min_i16_e64 v3, v0, s4
+; GFX10-NEXT: v_sub_nc_u16_e64 v2, v2, 0x7fff
+; GFX10-NEXT: v_sub_nc_u16_e64 v3, v3, 0x8000
+; GFX10-NEXT: v_max_i16_e64 v1, v2, v1
+; GFX10-NEXT: v_min_i16_e64 v1, v1, v3
+; GFX10-NEXT: v_sub_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i16 @llvm.ssub.sat.i16(i16 %lhs, i16 %rhs)
+ ret i16 %result
+}
+
+define amdgpu_ps i16 @s_ssubsat_i16(i16 inreg %lhs, i16 inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s2, s0, -1
+; GFX6-NEXT: s_sub_i32 s2, s2, 0x7fffffff
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s3, s0, -1
+; GFX6-NEXT: s_sub_i32 s3, s3, 0x80000000
+; GFX6-NEXT: s_cmp_gt_i32 s2, s1
+; GFX6-NEXT: s_cselect_b32 s1, s2, s1
+; GFX6-NEXT: s_cmp_lt_i32 s1, s3
+; GFX6-NEXT: s_cselect_b32 s1, s1, s3
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: s_ashr_i32 s0, s0, 16
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_sext_i32_i16 s2, s0
+; GFX8-NEXT: s_sext_i32_i16 s3, 0xffff
+; GFX8-NEXT: s_cmp_gt_i32 s2, s3
+; GFX8-NEXT: s_cselect_b32 s4, s2, s3
+; GFX8-NEXT: s_sub_i32 s4, s4, 0x7fff
+; GFX8-NEXT: s_cmp_lt_i32 s2, s3
+; GFX8-NEXT: s_cselect_b32 s2, s2, s3
+; GFX8-NEXT: s_sub_i32 s2, s2, 0x8000
+; GFX8-NEXT: s_sext_i32_i16 s3, s4
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_cmp_gt_i32 s3, s1
+; GFX8-NEXT: s_cselect_b32 s1, s3, s1
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_cmp_lt_i32 s1, s2
+; GFX8-NEXT: s_cselect_b32 s1, s1, s2
+; GFX8-NEXT: s_sub_i32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_sext_i32_i16 s2, s0
+; GFX9-NEXT: s_sext_i32_i16 s3, 0xffff
+; GFX9-NEXT: s_cmp_gt_i32 s2, s3
+; GFX9-NEXT: s_cselect_b32 s4, s2, s3
+; GFX9-NEXT: s_sub_i32 s4, s4, 0x7fff
+; GFX9-NEXT: s_cmp_lt_i32 s2, s3
+; GFX9-NEXT: s_cselect_b32 s2, s2, s3
+; GFX9-NEXT: s_sub_i32 s2, s2, 0x8000
+; GFX9-NEXT: s_sext_i32_i16 s3, s4
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_cmp_gt_i32 s3, s1
+; GFX9-NEXT: s_cselect_b32 s1, s3, s1
+; GFX9-NEXT: s_sext_i32_i16 s1, s1
+; GFX9-NEXT: s_sext_i32_i16 s2, s2
+; GFX9-NEXT: s_cmp_lt_i32 s1, s2
+; GFX9-NEXT: s_cselect_b32 s1, s1, s2
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_sext_i32_i16 s2, 0xffff
+; GFX10-NEXT: s_sext_i32_i16 s3, s0
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_gt_i32 s3, s2
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s4, s3, s2
+; GFX10-NEXT: s_sub_i32 s4, s4, 0x7fff
+; GFX10-NEXT: s_cmp_lt_i32 s3, s2
+; GFX10-NEXT: s_cselect_b32 s2, s3, s2
+; GFX10-NEXT: s_sext_i32_i16 s3, s4
+; GFX10-NEXT: s_sub_i32 s2, s2, 0x8000
+; GFX10-NEXT: s_cmp_gt_i32 s3, s1
+; GFX10-NEXT: s_sext_i32_i16 s2, s2
+; GFX10-NEXT: s_cselect_b32 s1, s3, s1
+; GFX10-NEXT: s_sext_i32_i16 s1, s1
+; GFX10-NEXT: s_cmp_lt_i32 s1, s2
+; GFX10-NEXT: s_cselect_b32 s1, s1, s2
+; GFX10-NEXT: s_sub_i32 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i16 @llvm.ssub.sat.i16(i16 %lhs, i16 %rhs)
+ ret i16 %result
+}
+
+define amdgpu_ps half @ssubsat_i16_sv(i16 inreg %lhs, i16 %rhs) {
+; GFX6-LABEL: ssubsat_i16_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s1, s0, -1
+; GFX6-NEXT: s_sub_i32 s1, s1, 0x7fffffff
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s2, s0, -1
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_sub_i32 s2, s2, 0x80000000
+; GFX6-NEXT: v_max_i32_e32 v0, s1, v0
+; GFX6-NEXT: v_min_i32_e32 v0, s2, v0
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s0, v0
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: ssubsat_i16_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_sext_i32_i16 s1, s0
+; GFX8-NEXT: s_sext_i32_i16 s2, 0xffff
+; GFX8-NEXT: s_cmp_gt_i32 s1, s2
+; GFX8-NEXT: s_cselect_b32 s3, s1, s2
+; GFX8-NEXT: s_sub_i32 s3, s3, 0x7fff
+; GFX8-NEXT: s_cmp_lt_i32 s1, s2
+; GFX8-NEXT: s_cselect_b32 s1, s1, s2
+; GFX8-NEXT: s_sub_i32 s1, s1, 0x8000
+; GFX8-NEXT: v_max_i16_e32 v0, s3, v0
+; GFX8-NEXT: v_min_i16_e32 v0, s1, v0
+; GFX8-NEXT: v_sub_u16_e32 v0, s0, v0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: ssubsat_i16_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_sext_i32_i16 s1, s0
+; GFX9-NEXT: s_sext_i32_i16 s2, 0xffff
+; GFX9-NEXT: s_cmp_gt_i32 s1, s2
+; GFX9-NEXT: s_cselect_b32 s3, s1, s2
+; GFX9-NEXT: s_sub_i32 s3, s3, 0x7fff
+; GFX9-NEXT: s_cmp_lt_i32 s1, s2
+; GFX9-NEXT: s_cselect_b32 s1, s1, s2
+; GFX9-NEXT: s_sub_i32 s1, s1, 0x8000
+; GFX9-NEXT: v_max_i16_e32 v0, s3, v0
+; GFX9-NEXT: v_min_i16_e32 v0, s1, v0
+; GFX9-NEXT: v_sub_u16_e32 v0, s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: ssubsat_i16_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_sext_i32_i16 s1, s0
+; GFX10-NEXT: s_sext_i32_i16 s2, 0xffff
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_gt_i32 s1, s2
+; GFX10-NEXT: s_cselect_b32 s3, s1, s2
+; GFX10-NEXT: s_sub_i32 s3, s3, 0x7fff
+; GFX10-NEXT: s_cmp_lt_i32 s1, s2
+; GFX10-NEXT: v_max_i16_e64 v0, s3, v0
+; GFX10-NEXT: s_cselect_b32 s1, s1, s2
+; GFX10-NEXT: s_sub_i32 s1, s1, 0x8000
+; GFX10-NEXT: v_min_i16_e64 v0, v0, s1
+; GFX10-NEXT: v_sub_nc_u16_e64 v0, s0, v0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i16 @llvm.ssub.sat.i16(i16 %lhs, i16 %rhs)
+ %cast = bitcast i16 %result to half
+ ret half %cast
+}
+
+define amdgpu_ps half @ssubsat_i16_vs(i16 %lhs, i16 inreg %rhs) {
+; GFX6-LABEL: ssubsat_i16_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_max_i32_e32 v1, -1, v0
+; GFX6-NEXT: v_min_i32_e32 v2, -1, v0
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: v_subrev_i32_e32 v1, vcc, 0x7fffffff, v1
+; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, 0x80000000, v2
+; GFX6-NEXT: v_max_i32_e32 v1, s0, v1
+; GFX6-NEXT: v_min_i32_e32 v1, v1, v2
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: ssubsat_i16_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_mov_b32 s1, 0xffff
+; GFX8-NEXT: v_max_i16_e32 v1, s1, v0
+; GFX8-NEXT: v_subrev_u16_e32 v1, 0x7fff, v1
+; GFX8-NEXT: v_min_i16_e32 v2, s1, v0
+; GFX8-NEXT: v_subrev_u16_e32 v2, 0x8000, v2
+; GFX8-NEXT: v_max_i16_e32 v1, s0, v1
+; GFX8-NEXT: v_min_i16_e32 v1, v1, v2
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: ssubsat_i16_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s1, 0xffff
+; GFX9-NEXT: v_max_i16_e32 v1, s1, v0
+; GFX9-NEXT: v_subrev_u16_e32 v1, 0x7fff, v1
+; GFX9-NEXT: v_min_i16_e32 v2, s1, v0
+; GFX9-NEXT: v_subrev_u16_e32 v2, 0x8000, v2
+; GFX9-NEXT: v_max_i16_e32 v1, s0, v1
+; GFX9-NEXT: v_min_i16_e32 v1, v1, v2
+; GFX9-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: ssubsat_i16_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_mov_b32 s1, 0xffff
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_max_i16_e64 v1, v0, s1
+; GFX10-NEXT: v_min_i16_e64 v2, v0, s1
+; GFX10-NEXT: v_sub_nc_u16_e64 v1, v1, 0x7fff
+; GFX10-NEXT: v_sub_nc_u16_e64 v2, v2, 0x8000
+; GFX10-NEXT: v_max_i16_e64 v1, v1, s0
+; GFX10-NEXT: v_min_i16_e64 v1, v1, v2
+; GFX10-NEXT: v_sub_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i16 @llvm.ssub.sat.i16(i16 %lhs, i16 %rhs)
+ %cast = bitcast i16 %result to half
+ ret half %cast
+}
+
+define <2 x i16> @v_ssubsat_v2i16(<2 x i16> %lhs, <2 x i16> %rhs) {
+; GFX6-LABEL: v_ssubsat_v2i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v4, -1, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_subrev_i32_e32 v4, vcc, s4, v4
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v5, -1, v0
+; GFX6-NEXT: v_subrev_i32_e32 v5, vcc, s5, v5
+; GFX6-NEXT: v_max_i32_e32 v2, v4, v2
+; GFX6-NEXT: v_min_i32_e32 v2, v2, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX6-NEXT: v_max_i32_e32 v3, -1, v1
+; GFX6-NEXT: v_min_i32_e32 v4, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v3, vcc, s4, v3
+; GFX6-NEXT: v_subrev_i32_e32 v4, vcc, s5, v4
+; GFX6-NEXT: v_max_i32_e32 v2, v3, v2
+; GFX6-NEXT: v_min_i32_e32 v2, v2, v4
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 16, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_v2i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s6, 0xffff
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v3, s6, v0
+; GFX8-NEXT: v_subrev_u16_e32 v3, s4, v3
+; GFX8-NEXT: s_mov_b32 s5, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v4, s6, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX8-NEXT: v_subrev_u16_e32 v4, s5, v4
+; GFX8-NEXT: v_max_i16_e32 v3, v3, v1
+; GFX8-NEXT: v_min_i16_e32 v3, v3, v4
+; GFX8-NEXT: v_max_i16_e32 v4, s6, v2
+; GFX8-NEXT: v_min_i16_e32 v5, s6, v2
+; GFX8-NEXT: v_subrev_u16_e32 v4, s4, v4
+; GFX8-NEXT: v_subrev_u16_e32 v5, s5, v5
+; GFX8-NEXT: v_max_i16_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v1, v1, v5
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v3
+; GFX8-NEXT: v_sub_u16_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, -1, -1
+; GFX9-NEXT: s_mov_b32 s5, 0xffff8000
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s4
+; GFX9-NEXT: v_pk_max_i16 v2, v0, s6
+; GFX9-NEXT: v_pk_sub_i16 v2, v2, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s5
+; GFX9-NEXT: v_pk_min_i16 v3, v0, s6
+; GFX9-NEXT: v_pk_sub_i16 v3, v3, s5
+; GFX9-NEXT: v_pk_max_i16 v1, v2, v1
+; GFX9-NEXT: v_pk_min_i16 v1, v1, v3
+; GFX9-NEXT: v_pk_sub_i16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, -1, -1
+; GFX10-NEXT: s_movk_i32 s5, 0x7fff
+; GFX10-NEXT: v_pk_max_i16 v2, v0, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s5, s5
+; GFX10-NEXT: v_pk_min_i16 v3, v0, s4
+; GFX10-NEXT: s_mov_b32 s6, 0xffff8000
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_pk_sub_i16 v2, v2, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s6, s6
+; GFX10-NEXT: v_pk_sub_i16 v3, v3, s4
+; GFX10-NEXT: v_pk_max_i16 v1, v2, v1
+; GFX10-NEXT: v_pk_min_i16 v1, v1, v3
+; GFX10-NEXT: v_pk_sub_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ ret <2 x i16> %result
+}
+
+define amdgpu_ps i32 @s_ssubsat_v2i16(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_v2i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: s_cselect_b32 s6, s0, -1
+; GFX6-NEXT: s_sub_i32 s6, s6, s4
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s7, s0, -1
+; GFX6-NEXT: s_sub_i32 s7, s7, s5
+; GFX6-NEXT: s_cmp_gt_i32 s6, s2
+; GFX6-NEXT: s_cselect_b32 s2, s6, s2
+; GFX6-NEXT: s_cmp_lt_i32 s2, s7
+; GFX6-NEXT: s_cselect_b32 s2, s2, s7
+; GFX6-NEXT: s_sub_i32 s0, s0, s2
+; GFX6-NEXT: s_ashr_i32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s2, s3, 16
+; GFX6-NEXT: s_cmp_gt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s3, s1, -1
+; GFX6-NEXT: s_sub_i32 s3, s3, s4
+; GFX6-NEXT: s_cmp_lt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s4, s1, -1
+; GFX6-NEXT: s_sub_i32 s4, s4, s5
+; GFX6-NEXT: s_cmp_gt_i32 s3, s2
+; GFX6-NEXT: s_cselect_b32 s2, s3, s2
+; GFX6-NEXT: s_cmp_lt_i32 s2, s4
+; GFX6-NEXT: s_cselect_b32 s2, s2, s4
+; GFX6-NEXT: s_sub_i32 s1, s1, s2
+; GFX6-NEXT: s_mov_b32 s2, 0xffff
+; GFX6-NEXT: s_ashr_i32 s1, s1, 16
+; GFX6-NEXT: s_and_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s0, s0, s2
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_v2i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshr_b32 s3, s1, 16
+; GFX8-NEXT: s_lshr_b32 s2, s0, 16
+; GFX8-NEXT: s_sext_i32_i16 s6, s0
+; GFX8-NEXT: s_sext_i32_i16 s7, 0xffff
+; GFX8-NEXT: s_cmp_gt_i32 s6, s7
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: s_cselect_b32 s8, s6, s7
+; GFX8-NEXT: s_sub_i32 s8, s8, s4
+; GFX8-NEXT: s_cmp_lt_i32 s6, s7
+; GFX8-NEXT: s_mov_b32 s5, 0x8000
+; GFX8-NEXT: s_cselect_b32 s6, s6, s7
+; GFX8-NEXT: s_sub_i32 s6, s6, s5
+; GFX8-NEXT: s_sext_i32_i16 s8, s8
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_cmp_gt_i32 s8, s1
+; GFX8-NEXT: s_cselect_b32 s1, s8, s1
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_cmp_lt_i32 s1, s6
+; GFX8-NEXT: s_cselect_b32 s1, s1, s6
+; GFX8-NEXT: s_sub_i32 s0, s0, s1
+; GFX8-NEXT: s_sext_i32_i16 s1, s2
+; GFX8-NEXT: s_cmp_gt_i32 s1, s7
+; GFX8-NEXT: s_cselect_b32 s6, s1, s7
+; GFX8-NEXT: s_sub_i32 s4, s6, s4
+; GFX8-NEXT: s_cmp_lt_i32 s1, s7
+; GFX8-NEXT: s_cselect_b32 s1, s1, s7
+; GFX8-NEXT: s_sub_i32 s1, s1, s5
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_cmp_gt_i32 s4, s3
+; GFX8-NEXT: s_cselect_b32 s3, s4, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s1, s1
+; GFX8-NEXT: s_cmp_lt_i32 s3, s1
+; GFX8-NEXT: s_cselect_b32 s1, s3, s1
+; GFX8-NEXT: s_sub_i32 s1, s2, s1
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s1, s1, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, -1, -1
+; GFX9-NEXT: s_sext_i32_i16 s7, s4
+; GFX9-NEXT: s_sext_i32_i16 s5, s0
+; GFX9-NEXT: s_ashr_i32 s6, s0, 16
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_cmp_gt_i32 s5, s7
+; GFX9-NEXT: s_cselect_b32 s8, s5, s7
+; GFX9-NEXT: s_cmp_gt_i32 s6, s4
+; GFX9-NEXT: s_movk_i32 s2, 0x7fff
+; GFX9-NEXT: s_cselect_b32 s9, s6, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s2
+; GFX9-NEXT: s_lshr_b32 s10, s2, 16
+; GFX9-NEXT: s_lshr_b32 s9, s8, 16
+; GFX9-NEXT: s_sub_i32 s2, s8, s2
+; GFX9-NEXT: s_sub_i32 s8, s9, s10
+; GFX9-NEXT: s_cmp_lt_i32 s5, s7
+; GFX9-NEXT: s_cselect_b32 s5, s5, s7
+; GFX9-NEXT: s_cmp_lt_i32 s6, s4
+; GFX9-NEXT: s_mov_b32 s3, 0xffff8000
+; GFX9-NEXT: s_cselect_b32 s4, s6, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s5, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s3
+; GFX9-NEXT: s_lshr_b32 s6, s3, 16
+; GFX9-NEXT: s_lshr_b32 s5, s4, 16
+; GFX9-NEXT: s_sub_i32 s3, s4, s3
+; GFX9-NEXT: s_sub_i32 s4, s5, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4
+; GFX9-NEXT: s_sext_i32_i16 s4, s2
+; GFX9-NEXT: s_sext_i32_i16 s5, s1
+; GFX9-NEXT: s_ashr_i32 s2, s2, 16
+; GFX9-NEXT: s_ashr_i32 s1, s1, 16
+; GFX9-NEXT: s_cmp_gt_i32 s4, s5
+; GFX9-NEXT: s_cselect_b32 s4, s4, s5
+; GFX9-NEXT: s_cmp_gt_i32 s2, s1
+; GFX9-NEXT: s_cselect_b32 s1, s2, s1
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s4, s1
+; GFX9-NEXT: s_sext_i32_i16 s2, s1
+; GFX9-NEXT: s_sext_i32_i16 s4, s3
+; GFX9-NEXT: s_ashr_i32 s1, s1, 16
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_cmp_lt_i32 s2, s4
+; GFX9-NEXT: s_cselect_b32 s2, s2, s4
+; GFX9-NEXT: s_cmp_lt_i32 s1, s3
+; GFX9-NEXT: s_cselect_b32 s1, s1, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s2, s1
+; GFX9-NEXT: s_lshr_b32 s2, s0, 16
+; GFX9-NEXT: s_lshr_b32 s3, s1, 16
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: s_sub_i32 s1, s2, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, -1, -1
+; GFX10-NEXT: s_sext_i32_i16 s3, s0
+; GFX10-NEXT: s_sext_i32_i16 s5, s2
+; GFX10-NEXT: s_ashr_i32 s4, s0, 16
+; GFX10-NEXT: s_ashr_i32 s2, s2, 16
+; GFX10-NEXT: s_cmp_gt_i32 s3, s5
+; GFX10-NEXT: s_movk_i32 s8, 0x7fff
+; GFX10-NEXT: s_cselect_b32 s6, s3, s5
+; GFX10-NEXT: s_cmp_gt_i32 s4, s2
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s7, s4, s2
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX10-NEXT: s_pack_ll_b32_b16 s7, s8, s8
+; GFX10-NEXT: s_lshr_b32 s8, s6, 16
+; GFX10-NEXT: s_lshr_b32 s9, s7, 16
+; GFX10-NEXT: s_sub_i32 s6, s6, s7
+; GFX10-NEXT: s_sub_i32 s7, s8, s9
+; GFX10-NEXT: s_cmp_lt_i32 s3, s5
+; GFX10-NEXT: s_cselect_b32 s3, s3, s5
+; GFX10-NEXT: s_cmp_lt_i32 s4, s2
+; GFX10-NEXT: s_mov_b32 s5, 0xffff8000
+; GFX10-NEXT: s_cselect_b32 s2, s4, s2
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s5, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s3, s2
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s6, s7
+; GFX10-NEXT: s_lshr_b32 s5, s2, 16
+; GFX10-NEXT: s_lshr_b32 s6, s4, 16
+; GFX10-NEXT: s_sub_i32 s2, s2, s4
+; GFX10-NEXT: s_sub_i32 s4, s5, s6
+; GFX10-NEXT: s_sext_i32_i16 s5, s3
+; GFX10-NEXT: s_sext_i32_i16 s6, s1
+; GFX10-NEXT: s_ashr_i32 s3, s3, 16
+; GFX10-NEXT: s_ashr_i32 s1, s1, 16
+; GFX10-NEXT: s_cmp_gt_i32 s5, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s4
+; GFX10-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-NEXT: s_cmp_gt_i32 s3, s1
+; GFX10-NEXT: s_sext_i32_i16 s4, s2
+; GFX10-NEXT: s_cselect_b32 s1, s3, s1
+; GFX10-NEXT: s_ashr_i32 s2, s2, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s5, s1
+; GFX10-NEXT: s_sext_i32_i16 s3, s1
+; GFX10-NEXT: s_ashr_i32 s1, s1, 16
+; GFX10-NEXT: s_cmp_lt_i32 s3, s4
+; GFX10-NEXT: s_cselect_b32 s3, s3, s4
+; GFX10-NEXT: s_cmp_lt_i32 s1, s2
+; GFX10-NEXT: s_cselect_b32 s1, s1, s2
+; GFX10-NEXT: s_lshr_b32 s2, s0, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s3, s1
+; GFX10-NEXT: s_lshr_b32 s3, s1, 16
+; GFX10-NEXT: s_sub_i32 s0, s0, s1
+; GFX10-NEXT: s_sub_i32 s1, s2, s3
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ %cast = bitcast <2 x i16> %result to i32
+ ret i32 %cast
+}
+
+define amdgpu_ps float @ssubsat_v2i16_sv(<2 x i16> inreg %lhs, <2 x i16> %rhs) {
+; GFX6-LABEL: ssubsat_v2i16_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_brev_b32 s2, -2
+; GFX6-NEXT: s_cselect_b32 s4, s0, -1
+; GFX6-NEXT: s_sub_i32 s4, s4, s2
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_mov_b32 s3, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s5, s0, -1
+; GFX6-NEXT: s_sub_i32 s5, s5, s3
+; GFX6-NEXT: v_max_i32_e32 v0, s4, v0
+; GFX6-NEXT: v_min_i32_e32 v0, s5, v0
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s0, v0
+; GFX6-NEXT: s_lshl_b32 s0, s1, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s1, s0, -1
+; GFX6-NEXT: s_sub_i32 s1, s1, s2
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_cselect_b32 s2, s0, -1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_sub_i32 s2, s2, s3
+; GFX6-NEXT: v_max_i32_e32 v1, s1, v1
+; GFX6-NEXT: v_min_i32_e32 v1, s2, v1
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, s0, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s0, 0xffff
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s0, v1
+; GFX6-NEXT: v_and_b32_e32 v0, s0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: ssubsat_v2i16_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshr_b32 s1, s0, 16
+; GFX8-NEXT: s_sext_i32_i16 s4, s0
+; GFX8-NEXT: s_sext_i32_i16 s5, 0xffff
+; GFX8-NEXT: s_cmp_gt_i32 s4, s5
+; GFX8-NEXT: s_movk_i32 s2, 0x7fff
+; GFX8-NEXT: s_cselect_b32 s6, s4, s5
+; GFX8-NEXT: s_sub_i32 s6, s6, s2
+; GFX8-NEXT: s_cmp_lt_i32 s4, s5
+; GFX8-NEXT: s_mov_b32 s3, 0x8000
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_sub_i32 s4, s4, s3
+; GFX8-NEXT: v_max_i16_e32 v1, s6, v0
+; GFX8-NEXT: v_min_i16_e32 v1, s4, v1
+; GFX8-NEXT: s_sext_i32_i16 s4, s1
+; GFX8-NEXT: s_cmp_gt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s6, s4, s5
+; GFX8-NEXT: s_sub_i32 s2, s6, s2
+; GFX8-NEXT: s_cmp_lt_i32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: v_mov_b32_e32 v2, s2
+; GFX8-NEXT: v_max_i16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: s_sub_i32 s3, s4, s3
+; GFX8-NEXT: v_min_i16_e32 v0, s3, v0
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: v_sub_u16_e32 v1, s0, v1
+; GFX8-NEXT: v_sub_u16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: ssubsat_v2i16_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, -1, -1
+; GFX9-NEXT: s_sext_i32_i16 s6, s3
+; GFX9-NEXT: s_sext_i32_i16 s4, s0
+; GFX9-NEXT: s_ashr_i32 s5, s0, 16
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_cmp_gt_i32 s4, s6
+; GFX9-NEXT: s_cselect_b32 s7, s4, s6
+; GFX9-NEXT: s_cmp_gt_i32 s5, s3
+; GFX9-NEXT: s_movk_i32 s1, 0x7fff
+; GFX9-NEXT: s_cselect_b32 s8, s5, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s1
+; GFX9-NEXT: s_lshr_b32 s9, s1, 16
+; GFX9-NEXT: s_lshr_b32 s8, s7, 16
+; GFX9-NEXT: s_sub_i32 s1, s7, s1
+; GFX9-NEXT: s_sub_i32 s7, s8, s9
+; GFX9-NEXT: s_cmp_lt_i32 s4, s6
+; GFX9-NEXT: s_cselect_b32 s4, s4, s6
+; GFX9-NEXT: s_cmp_lt_i32 s5, s3
+; GFX9-NEXT: s_mov_b32 s2, 0xffff8000
+; GFX9-NEXT: s_cselect_b32 s3, s5, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s4, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s2
+; GFX9-NEXT: s_lshr_b32 s5, s2, 16
+; GFX9-NEXT: s_lshr_b32 s4, s3, 16
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s7
+; GFX9-NEXT: s_sub_i32 s2, s3, s2
+; GFX9-NEXT: s_sub_i32 s3, s4, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s3
+; GFX9-NEXT: v_pk_max_i16 v0, s1, v0
+; GFX9-NEXT: v_pk_min_i16 v0, v0, s2
+; GFX9-NEXT: v_pk_sub_i16 v0, s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: ssubsat_v2i16_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, -1, -1
+; GFX10-NEXT: s_sext_i32_i16 s2, s0
+; GFX10-NEXT: s_sext_i32_i16 s4, s1
+; GFX10-NEXT: s_ashr_i32 s3, s0, 16
+; GFX10-NEXT: s_ashr_i32 s1, s1, 16
+; GFX10-NEXT: s_cmp_gt_i32 s2, s4
+; GFX10-NEXT: s_movk_i32 s7, 0x7fff
+; GFX10-NEXT: s_cselect_b32 s5, s2, s4
+; GFX10-NEXT: s_cmp_gt_i32 s3, s1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s6, s3, s1
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s7, s7
+; GFX10-NEXT: s_lshr_b32 s7, s5, 16
+; GFX10-NEXT: s_lshr_b32 s8, s6, 16
+; GFX10-NEXT: s_sub_i32 s5, s5, s6
+; GFX10-NEXT: s_sub_i32 s6, s7, s8
+; GFX10-NEXT: s_cmp_lt_i32 s2, s4
+; GFX10-NEXT: s_cselect_b32 s2, s2, s4
+; GFX10-NEXT: s_cmp_lt_i32 s3, s1
+; GFX10-NEXT: s_mov_b32 s4, 0xffff8000
+; GFX10-NEXT: s_cselect_b32 s1, s3, s1
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s4, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s2, s1
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s5, s6
+; GFX10-NEXT: s_lshr_b32 s4, s1, 16
+; GFX10-NEXT: s_lshr_b32 s5, s3, 16
+; GFX10-NEXT: v_pk_max_i16 v0, s2, v0
+; GFX10-NEXT: s_sub_i32 s1, s1, s3
+; GFX10-NEXT: s_sub_i32 s2, s4, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s2
+; GFX10-NEXT: v_pk_min_i16 v0, v0, s1
+; GFX10-NEXT: v_pk_sub_i16 v0, s0, v0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ %cast = bitcast <2 x i16> %result to float
+ ret float %cast
+}
+
+define amdgpu_ps float @ssubsat_v2i16_vs(<2 x i16> %lhs, <2 x i16> inreg %rhs) {
+; GFX6-LABEL: ssubsat_v2i16_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_brev_b32 s2, -2
+; GFX6-NEXT: v_max_i32_e32 v2, -1, v0
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, s2, v2
+; GFX6-NEXT: s_mov_b32 s3, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v3, -1, v0
+; GFX6-NEXT: v_max_i32_e32 v2, s0, v2
+; GFX6-NEXT: v_subrev_i32_e32 v3, vcc, s3, v3
+; GFX6-NEXT: v_min_i32_e32 v2, v2, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_max_i32_e32 v2, -1, v1
+; GFX6-NEXT: v_min_i32_e32 v3, -1, v1
+; GFX6-NEXT: s_lshl_b32 s0, s1, 16
+; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, s2, v2
+; GFX6-NEXT: v_max_i32_e32 v2, s0, v2
+; GFX6-NEXT: v_subrev_i32_e32 v3, vcc, s3, v3
+; GFX6-NEXT: v_min_i32_e32 v2, v2, v3
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s0, 0xffff
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s0, v1
+; GFX6-NEXT: v_and_b32_e32 v0, s0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: ssubsat_v2i16_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_mov_b32 s4, 0xffff
+; GFX8-NEXT: s_movk_i32 s2, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v2, s4, v0
+; GFX8-NEXT: v_subrev_u16_e32 v2, s2, v2
+; GFX8-NEXT: s_mov_b32 s3, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v3, s4, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_subrev_u16_e32 v3, s3, v3
+; GFX8-NEXT: v_max_i16_e32 v2, s0, v2
+; GFX8-NEXT: v_min_i16_e32 v2, v2, v3
+; GFX8-NEXT: v_max_i16_e32 v3, s4, v1
+; GFX8-NEXT: v_min_i16_e32 v4, s4, v1
+; GFX8-NEXT: s_lshr_b32 s1, s0, 16
+; GFX8-NEXT: v_subrev_u16_e32 v3, s2, v3
+; GFX8-NEXT: v_subrev_u16_e32 v4, s3, v4
+; GFX8-NEXT: v_max_i16_e32 v3, s1, v3
+; GFX8-NEXT: v_min_i16_e32 v3, v3, v4
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v2
+; GFX8-NEXT: v_sub_u16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: ssubsat_v2i16_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_movk_i32 s1, 0x7fff
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, -1, -1
+; GFX9-NEXT: s_mov_b32 s2, 0xffff8000
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s1
+; GFX9-NEXT: v_pk_max_i16 v1, v0, s3
+; GFX9-NEXT: v_pk_sub_i16 v1, v1, s1
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s2
+; GFX9-NEXT: v_pk_min_i16 v2, v0, s3
+; GFX9-NEXT: v_pk_sub_i16 v2, v2, s2
+; GFX9-NEXT: v_pk_max_i16 v1, v1, s0
+; GFX9-NEXT: v_pk_min_i16 v1, v1, v2
+; GFX9-NEXT: v_pk_sub_i16 v0, v0, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: ssubsat_v2i16_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, -1, -1
+; GFX10-NEXT: s_movk_i32 s2, 0x7fff
+; GFX10-NEXT: v_pk_max_i16 v1, v0, s1
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s2
+; GFX10-NEXT: v_pk_min_i16 v2, v0, s1
+; GFX10-NEXT: s_mov_b32 s3, 0xffff8000
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_pk_sub_i16 v1, v1, s2
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s3, s3
+; GFX10-NEXT: v_pk_sub_i16 v2, v2, s1
+; GFX10-NEXT: v_pk_max_i16 v1, v1, s0
+; GFX10-NEXT: v_pk_min_i16 v1, v1, v2
+; GFX10-NEXT: v_pk_sub_i16 v0, v0, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ %cast = bitcast <2 x i16> %result to float
+ ret float %cast
+}
+
+; FIXME: v3i16 insert/extract
+; define <3 x i16> @v_ssubsat_v3i16(<3 x i16> %lhs, <3 x i16> %rhs) {
+; %result = call <3 x i16> @llvm.ssub.sat.v3i16(<3 x i16> %lhs, <3 x i16> %rhs)
+; ret <3 x i16> %result
+; }
+
+; define amdgpu_ps <3 x i16> @s_ssubsat_v3i16(<3 x i16> inreg %lhs, <3 x i16> inreg %rhs) {
+; %result = call <3 x i16> @llvm.ssub.sat.v3i16(<3 x i16> %lhs, <3 x i16> %rhs)
+; ret <3 x i16> %result
+; }
+
+define <2 x float> @v_ssubsat_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; GFX6-LABEL: v_ssubsat_v4i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v8, -1, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_subrev_i32_e32 v8, vcc, s4, v8
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v10, -1, v0
+; GFX6-NEXT: v_subrev_i32_e32 v10, vcc, s5, v10
+; GFX6-NEXT: v_max_i32_e32 v4, v8, v4
+; GFX6-NEXT: v_min_i32_e32 v4, v4, v10
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v5
+; GFX6-NEXT: v_max_i32_e32 v5, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v5, vcc, s4, v5
+; GFX6-NEXT: v_min_i32_e32 v8, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v8, vcc, s5, v8
+; GFX6-NEXT: v_max_i32_e32 v4, v5, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_min_i32_e32 v4, v4, v8
+; GFX6-NEXT: v_bfrev_b32_e32 v9, -2
+; GFX6-NEXT: v_max_i32_e32 v5, -1, v2
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX6-NEXT: v_min_i32_e32 v6, -1, v2
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v5, v9
+; GFX6-NEXT: v_subrev_i32_e32 v6, vcc, s5, v6
+; GFX6-NEXT: v_max_i32_e32 v4, v5, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_min_i32_e32 v4, v4, v6
+; GFX6-NEXT: v_max_i32_e32 v5, -1, v3
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v4
+; GFX6-NEXT: v_mov_b32_e32 v11, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v6, -1, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v7
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v5, v9
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, v6, v11
+; GFX6-NEXT: v_max_i32_e32 v4, v5, v4
+; GFX6-NEXT: v_min_i32_e32 v4, v4, v6
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, v3, v4
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_ashrrev_i32_e32 v2, 16, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_v4i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s6, 0xffff
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v6, s6, v0
+; GFX8-NEXT: v_subrev_u16_e32 v6, s4, v6
+; GFX8-NEXT: s_mov_b32 s5, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v7, s6, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX8-NEXT: v_subrev_u16_e32 v7, s5, v7
+; GFX8-NEXT: v_max_i16_e32 v6, v6, v2
+; GFX8-NEXT: v_min_i16_e32 v6, v6, v7
+; GFX8-NEXT: v_max_i16_e32 v7, s6, v4
+; GFX8-NEXT: v_min_i16_e32 v8, s6, v4
+; GFX8-NEXT: v_subrev_u16_e32 v7, s4, v7
+; GFX8-NEXT: v_max_i16_sdwa v2, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_max_i16_e32 v7, s6, v1
+; GFX8-NEXT: v_subrev_u16_e32 v8, s5, v8
+; GFX8-NEXT: v_min_i16_e32 v2, v2, v8
+; GFX8-NEXT: v_subrev_u16_e32 v7, s4, v7
+; GFX8-NEXT: v_min_i16_e32 v8, s6, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX8-NEXT: v_subrev_u16_e32 v8, s5, v8
+; GFX8-NEXT: v_max_i16_e32 v7, v7, v3
+; GFX8-NEXT: v_min_i16_e32 v7, v7, v8
+; GFX8-NEXT: v_max_i16_e32 v8, s6, v5
+; GFX8-NEXT: v_min_i16_e32 v9, -1, v5
+; GFX8-NEXT: v_subrev_u16_e32 v8, s4, v8
+; GFX8-NEXT: v_subrev_u16_e32 v9, s5, v9
+; GFX8-NEXT: v_max_i16_sdwa v3, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_min_i16_e32 v3, v3, v9
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v6
+; GFX8-NEXT: v_sub_u16_sdwa v2, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_sub_u16_e32 v1, v1, v7
+; GFX8-NEXT: v_sub_u16_sdwa v2, v5, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_v4i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, -1, -1
+; GFX9-NEXT: s_mov_b32 s5, 0xffff8000
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s4
+; GFX9-NEXT: v_pk_max_i16 v4, v0, s6
+; GFX9-NEXT: v_pk_sub_i16 v4, v4, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s5
+; GFX9-NEXT: v_pk_min_i16 v5, v0, s6
+; GFX9-NEXT: v_pk_max_i16 v2, v4, v2
+; GFX9-NEXT: v_pk_sub_i16 v5, v5, s5
+; GFX9-NEXT: v_pk_min_i16 v2, v2, v5
+; GFX9-NEXT: v_pk_sub_i16 v0, v0, v2
+; GFX9-NEXT: v_pk_max_i16 v2, v1, s6
+; GFX9-NEXT: v_pk_sub_i16 v2, v2, s4
+; GFX9-NEXT: v_pk_min_i16 v4, v1, s6
+; GFX9-NEXT: v_pk_sub_i16 v4, v4, s5
+; GFX9-NEXT: v_pk_max_i16 v2, v2, v3
+; GFX9-NEXT: v_pk_min_i16 v2, v2, v4
+; GFX9-NEXT: v_pk_sub_i16 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_v4i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, -1, -1
+; GFX10-NEXT: s_movk_i32 s4, 0x7fff
+; GFX10-NEXT: v_pk_max_i16 v4, v0, s5
+; GFX10-NEXT: v_pk_max_i16 v5, v1, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s4, s4
+; GFX10-NEXT: v_pk_min_i16 v6, v0, s5
+; GFX10-NEXT: v_pk_min_i16 v7, v1, s5
+; GFX10-NEXT: v_pk_sub_i16 v4, v4, s4
+; GFX10-NEXT: v_pk_sub_i16 v5, v5, s4
+; GFX10-NEXT: s_mov_b32 s6, 0xffff8000
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s6, s6
+; GFX10-NEXT: v_pk_max_i16 v11, v4, v2
+; GFX10-NEXT: v_pk_sub_i16 v6, v6, s6
+; GFX10-NEXT: v_pk_sub_i16 v4, v7, s6
+; GFX10-NEXT: v_pk_max_i16 v3, v5, v3
+; GFX10-NEXT: v_pk_min_i16 v2, v11, v6
+; GFX10-NEXT: v_pk_min_i16 v3, v3, v4
+; GFX10-NEXT: v_pk_sub_i16 v0, v0, v2
+; GFX10-NEXT: v_pk_sub_i16 v1, v1, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+ %cast = bitcast <4 x i16> %result to <2 x float>
+ ret <2 x float> %cast
+}
+
+define amdgpu_ps <2 x i32> @s_ssubsat_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_v4i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_brev_b32 s8, -2
+; GFX6-NEXT: s_cselect_b32 s10, s0, -1
+; GFX6-NEXT: s_sub_i32 s10, s10, s8
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_mov_b32 s9, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s11, s0, -1
+; GFX6-NEXT: s_sub_i32 s11, s11, s9
+; GFX6-NEXT: s_cmp_gt_i32 s10, s4
+; GFX6-NEXT: s_cselect_b32 s4, s10, s4
+; GFX6-NEXT: s_cmp_lt_i32 s4, s11
+; GFX6-NEXT: s_cselect_b32 s4, s4, s11
+; GFX6-NEXT: s_sub_i32 s0, s0, s4
+; GFX6-NEXT: s_ashr_i32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s4, s5, 16
+; GFX6-NEXT: s_cmp_gt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s5, s1, -1
+; GFX6-NEXT: s_sub_i32 s5, s5, s8
+; GFX6-NEXT: s_cmp_lt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s10, s1, -1
+; GFX6-NEXT: s_sub_i32 s10, s10, s9
+; GFX6-NEXT: s_cmp_gt_i32 s5, s4
+; GFX6-NEXT: s_cselect_b32 s4, s5, s4
+; GFX6-NEXT: s_cmp_lt_i32 s4, s10
+; GFX6-NEXT: s_cselect_b32 s4, s4, s10
+; GFX6-NEXT: s_sub_i32 s1, s1, s4
+; GFX6-NEXT: s_ashr_i32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s4, s6, 16
+; GFX6-NEXT: s_cmp_gt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s5, s2, -1
+; GFX6-NEXT: s_sub_i32 s5, s5, s8
+; GFX6-NEXT: s_cmp_lt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s6, s2, -1
+; GFX6-NEXT: s_sub_i32 s6, s6, s9
+; GFX6-NEXT: s_cmp_gt_i32 s5, s4
+; GFX6-NEXT: s_cselect_b32 s4, s5, s4
+; GFX6-NEXT: s_cmp_lt_i32 s4, s6
+; GFX6-NEXT: s_cselect_b32 s4, s4, s6
+; GFX6-NEXT: s_sub_i32 s2, s2, s4
+; GFX6-NEXT: s_ashr_i32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s4, s7, 16
+; GFX6-NEXT: s_cmp_gt_i32 s3, -1
+; GFX6-NEXT: s_cselect_b32 s5, s3, -1
+; GFX6-NEXT: s_sub_i32 s5, s5, s8
+; GFX6-NEXT: s_cmp_lt_i32 s3, -1
+; GFX6-NEXT: s_cselect_b32 s6, s3, -1
+; GFX6-NEXT: s_sub_i32 s6, s6, s9
+; GFX6-NEXT: s_cmp_gt_i32 s5, s4
+; GFX6-NEXT: s_cselect_b32 s4, s5, s4
+; GFX6-NEXT: s_cmp_lt_i32 s4, s6
+; GFX6-NEXT: s_cselect_b32 s4, s4, s6
+; GFX6-NEXT: s_sub_i32 s3, s3, s4
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: s_and_b32 s1, s1, s4
+; GFX6-NEXT: s_ashr_i32 s3, s3, 16
+; GFX6-NEXT: s_and_b32 s0, s0, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s4
+; GFX6-NEXT: s_and_b32 s2, s3, s4
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_or_b32 s1, s1, s2
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_v4i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshr_b32 s6, s2, 16
+; GFX8-NEXT: s_lshr_b32 s7, s3, 16
+; GFX8-NEXT: s_lshr_b32 s4, s0, 16
+; GFX8-NEXT: s_lshr_b32 s5, s1, 16
+; GFX8-NEXT: s_sext_i32_i16 s10, s0
+; GFX8-NEXT: s_sext_i32_i16 s11, 0xffff
+; GFX8-NEXT: s_cmp_gt_i32 s10, s11
+; GFX8-NEXT: s_movk_i32 s8, 0x7fff
+; GFX8-NEXT: s_cselect_b32 s12, s10, s11
+; GFX8-NEXT: s_sub_i32 s12, s12, s8
+; GFX8-NEXT: s_cmp_lt_i32 s10, s11
+; GFX8-NEXT: s_mov_b32 s9, 0x8000
+; GFX8-NEXT: s_cselect_b32 s10, s10, s11
+; GFX8-NEXT: s_sub_i32 s10, s10, s9
+; GFX8-NEXT: s_sext_i32_i16 s12, s12
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_cmp_gt_i32 s12, s2
+; GFX8-NEXT: s_cselect_b32 s2, s12, s2
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_sext_i32_i16 s10, s10
+; GFX8-NEXT: s_cmp_lt_i32 s2, s10
+; GFX8-NEXT: s_cselect_b32 s2, s2, s10
+; GFX8-NEXT: s_sub_i32 s0, s0, s2
+; GFX8-NEXT: s_sext_i32_i16 s2, s4
+; GFX8-NEXT: s_cmp_gt_i32 s2, s11
+; GFX8-NEXT: s_cselect_b32 s10, s2, s11
+; GFX8-NEXT: s_sub_i32 s10, s10, s8
+; GFX8-NEXT: s_cmp_lt_i32 s2, s11
+; GFX8-NEXT: s_cselect_b32 s2, s2, s11
+; GFX8-NEXT: s_sub_i32 s2, s2, s9
+; GFX8-NEXT: s_sext_i32_i16 s10, s10
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_cmp_gt_i32 s10, s6
+; GFX8-NEXT: s_cselect_b32 s6, s10, s6
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_sext_i32_i16 s2, s2
+; GFX8-NEXT: s_cmp_lt_i32 s6, s2
+; GFX8-NEXT: s_cselect_b32 s2, s6, s2
+; GFX8-NEXT: s_sub_i32 s2, s4, s2
+; GFX8-NEXT: s_sext_i32_i16 s4, s1
+; GFX8-NEXT: s_cmp_gt_i32 s4, s11
+; GFX8-NEXT: s_cselect_b32 s6, s4, s11
+; GFX8-NEXT: s_sub_i32 s6, s6, s8
+; GFX8-NEXT: s_cmp_lt_i32 s4, s11
+; GFX8-NEXT: s_cselect_b32 s4, s4, s11
+; GFX8-NEXT: s_sub_i32 s4, s4, s9
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_cmp_gt_i32 s6, s3
+; GFX8-NEXT: s_cselect_b32 s3, s6, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_cmp_lt_i32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_sub_i32 s1, s1, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s5
+; GFX8-NEXT: s_cmp_gt_i32 s3, s11
+; GFX8-NEXT: s_cselect_b32 s4, s3, s11
+; GFX8-NEXT: s_sub_i32 s4, s4, s8
+; GFX8-NEXT: s_cmp_lt_i32 s3, s11
+; GFX8-NEXT: s_cselect_b32 s3, s3, s11
+; GFX8-NEXT: s_sub_i32 s3, s3, s9
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s6, s7
+; GFX8-NEXT: s_cmp_gt_i32 s4, s6
+; GFX8-NEXT: s_cselect_b32 s4, s4, s6
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_cmp_lt_i32 s4, s3
+; GFX8-NEXT: s_cselect_b32 s3, s4, s3
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_sub_i32 s3, s5, s3
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s2, s2, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s2
+; GFX8-NEXT: s_bfe_u32 s2, s3, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_lshl_b32 s2, s2, 16
+; GFX8-NEXT: s_or_b32 s1, s1, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_v4i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, -1, -1
+; GFX9-NEXT: s_sext_i32_i16 s9, s6
+; GFX9-NEXT: s_sext_i32_i16 s7, s0
+; GFX9-NEXT: s_ashr_i32 s8, s0, 16
+; GFX9-NEXT: s_ashr_i32 s6, s6, 16
+; GFX9-NEXT: s_cmp_gt_i32 s7, s9
+; GFX9-NEXT: s_cselect_b32 s10, s7, s9
+; GFX9-NEXT: s_cmp_gt_i32 s8, s6
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: s_cselect_b32 s11, s8, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s11
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s4
+; GFX9-NEXT: s_lshr_b32 s11, s10, 16
+; GFX9-NEXT: s_lshr_b32 s12, s4, 16
+; GFX9-NEXT: s_sub_i32 s10, s10, s4
+; GFX9-NEXT: s_sub_i32 s11, s11, s12
+; GFX9-NEXT: s_cmp_lt_i32 s7, s9
+; GFX9-NEXT: s_cselect_b32 s7, s7, s9
+; GFX9-NEXT: s_cmp_lt_i32 s8, s6
+; GFX9-NEXT: s_mov_b32 s5, 0xffff8000
+; GFX9-NEXT: s_cselect_b32 s8, s8, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s11
+; GFX9-NEXT: s_lshr_b32 s8, s7, 16
+; GFX9-NEXT: s_lshr_b32 s11, s5, 16
+; GFX9-NEXT: s_sub_i32 s7, s7, s5
+; GFX9-NEXT: s_sub_i32 s8, s8, s11
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX9-NEXT: s_sext_i32_i16 s8, s10
+; GFX9-NEXT: s_sext_i32_i16 s13, s2
+; GFX9-NEXT: s_ashr_i32 s10, s10, 16
+; GFX9-NEXT: s_ashr_i32 s2, s2, 16
+; GFX9-NEXT: s_cmp_gt_i32 s8, s13
+; GFX9-NEXT: s_cselect_b32 s8, s8, s13
+; GFX9-NEXT: s_cmp_gt_i32 s10, s2
+; GFX9-NEXT: s_cselect_b32 s2, s10, s2
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s8, s2
+; GFX9-NEXT: s_sext_i32_i16 s8, s2
+; GFX9-NEXT: s_sext_i32_i16 s10, s7
+; GFX9-NEXT: s_ashr_i32 s2, s2, 16
+; GFX9-NEXT: s_ashr_i32 s7, s7, 16
+; GFX9-NEXT: s_cmp_lt_i32 s8, s10
+; GFX9-NEXT: s_cselect_b32 s8, s8, s10
+; GFX9-NEXT: s_cmp_lt_i32 s2, s7
+; GFX9-NEXT: s_cselect_b32 s2, s2, s7
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s8, s2
+; GFX9-NEXT: s_lshr_b32 s7, s0, 16
+; GFX9-NEXT: s_lshr_b32 s8, s2, 16
+; GFX9-NEXT: s_sub_i32 s0, s0, s2
+; GFX9-NEXT: s_sub_i32 s2, s7, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
+; GFX9-NEXT: s_sext_i32_i16 s2, s1
+; GFX9-NEXT: s_ashr_i32 s7, s1, 16
+; GFX9-NEXT: s_cmp_gt_i32 s2, s9
+; GFX9-NEXT: s_cselect_b32 s8, s2, s9
+; GFX9-NEXT: s_cmp_gt_i32 s7, s6
+; GFX9-NEXT: s_cselect_b32 s10, s7, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s10
+; GFX9-NEXT: s_lshr_b32 s10, s8, 16
+; GFX9-NEXT: s_sub_i32 s4, s8, s4
+; GFX9-NEXT: s_sub_i32 s8, s10, s12
+; GFX9-NEXT: s_cmp_lt_i32 s2, s9
+; GFX9-NEXT: s_cselect_b32 s2, s2, s9
+; GFX9-NEXT: s_cmp_lt_i32 s7, s6
+; GFX9-NEXT: s_cselect_b32 s6, s7, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s6
+; GFX9-NEXT: s_lshr_b32 s6, s2, 16
+; GFX9-NEXT: s_sub_i32 s2, s2, s5
+; GFX9-NEXT: s_sub_i32 s5, s6, s11
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s5
+; GFX9-NEXT: s_sext_i32_i16 s5, s4
+; GFX9-NEXT: s_sext_i32_i16 s6, s3
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_cmp_gt_i32 s5, s6
+; GFX9-NEXT: s_cselect_b32 s5, s5, s6
+; GFX9-NEXT: s_cmp_gt_i32 s4, s3
+; GFX9-NEXT: s_cselect_b32 s3, s4, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s5, s3
+; GFX9-NEXT: s_sext_i32_i16 s4, s3
+; GFX9-NEXT: s_sext_i32_i16 s5, s2
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_ashr_i32 s2, s2, 16
+; GFX9-NEXT: s_cmp_lt_i32 s4, s5
+; GFX9-NEXT: s_cselect_b32 s4, s4, s5
+; GFX9-NEXT: s_cmp_lt_i32 s3, s2
+; GFX9-NEXT: s_cselect_b32 s2, s3, s2
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s4, s2
+; GFX9-NEXT: s_lshr_b32 s3, s1, 16
+; GFX9-NEXT: s_lshr_b32 s4, s2, 16
+; GFX9-NEXT: s_sub_i32 s1, s1, s2
+; GFX9-NEXT: s_sub_i32 s2, s3, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_v4i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, -1, -1
+; GFX10-NEXT: s_sext_i32_i16 s5, s0
+; GFX10-NEXT: s_sext_i32_i16 s7, s4
+; GFX10-NEXT: s_ashr_i32 s6, s0, 16
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_cmp_gt_i32 s5, s7
+; GFX10-NEXT: s_movk_i32 s10, 0x7fff
+; GFX10-NEXT: s_cselect_b32 s8, s5, s7
+; GFX10-NEXT: s_cmp_gt_i32 s6, s4
+; GFX10-NEXT: s_mov_b32 s12, 0xffff8000
+; GFX10-NEXT: s_cselect_b32 s9, s6, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s12, s12, s12
+; GFX10-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX10-NEXT: s_pack_ll_b32_b16 s9, s10, s10
+; GFX10-NEXT: s_lshr_b32 s10, s8, 16
+; GFX10-NEXT: s_lshr_b32 s11, s9, 16
+; GFX10-NEXT: s_sub_i32 s8, s8, s9
+; GFX10-NEXT: s_sub_i32 s10, s10, s11
+; GFX10-NEXT: s_cmp_lt_i32 s5, s7
+; GFX10-NEXT: s_sext_i32_i16 s14, s2
+; GFX10-NEXT: s_cselect_b32 s5, s5, s7
+; GFX10-NEXT: s_cmp_lt_i32 s6, s4
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s6, s6, s4
+; GFX10-NEXT: s_ashr_i32 s2, s2, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s8, s10
+; GFX10-NEXT: s_lshr_b32 s8, s5, 16
+; GFX10-NEXT: s_lshr_b32 s10, s12, 16
+; GFX10-NEXT: s_sext_i32_i16 s13, s6
+; GFX10-NEXT: s_sub_i32 s5, s5, s12
+; GFX10-NEXT: s_sub_i32 s8, s8, s10
+; GFX10-NEXT: s_ashr_i32 s6, s6, 16
+; GFX10-NEXT: s_cmp_gt_i32 s13, s14
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s5, s8
+; GFX10-NEXT: s_cselect_b32 s13, s13, s14
+; GFX10-NEXT: s_cmp_gt_i32 s6, s2
+; GFX10-NEXT: s_sext_i32_i16 s8, s5
+; GFX10-NEXT: s_cselect_b32 s2, s6, s2
+; GFX10-NEXT: s_ashr_i32 s5, s5, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s13, s2
+; GFX10-NEXT: s_sext_i32_i16 s6, s2
+; GFX10-NEXT: s_ashr_i32 s2, s2, 16
+; GFX10-NEXT: s_cmp_lt_i32 s6, s8
+; GFX10-NEXT: s_cselect_b32 s6, s6, s8
+; GFX10-NEXT: s_cmp_lt_i32 s2, s5
+; GFX10-NEXT: s_cselect_b32 s2, s2, s5
+; GFX10-NEXT: s_lshr_b32 s5, s0, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s6, s2
+; GFX10-NEXT: s_lshr_b32 s6, s2, 16
+; GFX10-NEXT: s_sub_i32 s0, s0, s2
+; GFX10-NEXT: s_sub_i32 s2, s5, s6
+; GFX10-NEXT: s_sext_i32_i16 s5, s1
+; GFX10-NEXT: s_ashr_i32 s6, s1, 16
+; GFX10-NEXT: s_cmp_gt_i32 s5, s7
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s2
+; GFX10-NEXT: s_cselect_b32 s8, s5, s7
+; GFX10-NEXT: s_cmp_gt_i32 s6, s4
+; GFX10-NEXT: s_cselect_b32 s13, s6, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s8, s8, s13
+; GFX10-NEXT: s_lshr_b32 s13, s8, 16
+; GFX10-NEXT: s_sub_i32 s8, s8, s9
+; GFX10-NEXT: s_sub_i32 s9, s13, s11
+; GFX10-NEXT: s_cmp_lt_i32 s5, s7
+; GFX10-NEXT: s_cselect_b32 s5, s5, s7
+; GFX10-NEXT: s_cmp_lt_i32 s6, s4
+; GFX10-NEXT: s_cselect_b32 s4, s6, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s5, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s8, s9
+; GFX10-NEXT: s_lshr_b32 s6, s4, 16
+; GFX10-NEXT: s_sext_i32_i16 s7, s5
+; GFX10-NEXT: s_sext_i32_i16 s8, s3
+; GFX10-NEXT: s_sub_i32 s4, s4, s12
+; GFX10-NEXT: s_sub_i32 s6, s6, s10
+; GFX10-NEXT: s_ashr_i32 s5, s5, 16
+; GFX10-NEXT: s_ashr_i32 s3, s3, 16
+; GFX10-NEXT: s_cmp_gt_i32 s7, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s4, s6
+; GFX10-NEXT: s_cselect_b32 s7, s7, s8
+; GFX10-NEXT: s_cmp_gt_i32 s5, s3
+; GFX10-NEXT: s_sext_i32_i16 s6, s4
+; GFX10-NEXT: s_cselect_b32 s3, s5, s3
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s7, s3
+; GFX10-NEXT: s_sext_i32_i16 s5, s3
+; GFX10-NEXT: s_ashr_i32 s3, s3, 16
+; GFX10-NEXT: s_cmp_lt_i32 s5, s6
+; GFX10-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-NEXT: s_cmp_lt_i32 s3, s4
+; GFX10-NEXT: s_cselect_b32 s3, s3, s4
+; GFX10-NEXT: s_lshr_b32 s4, s1, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s5, s3
+; GFX10-NEXT: s_lshr_b32 s5, s3, 16
+; GFX10-NEXT: s_sub_i32 s1, s1, s3
+; GFX10-NEXT: s_sub_i32 s3, s4, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+ %cast = bitcast <4 x i16> %result to <2 x i32>
+ ret <2 x i32> %cast
+}
+
+; FIXME
+; define <5 x i16> @v_ssubsat_v5i16(<5 x i16> %lhs, <5 x i16> %rhs) {
+; %result = call <5 x i16> @llvm.ssub.sat.v5i16(<5 x i16> %lhs, <5 x i16> %rhs)
+; ret <5 x i16> %result
+; }
+
+; define amdgpu_ps <5 x i16> @s_ssubsat_v5i16(<5 x i16> inreg %lhs, <5 x i16> inreg %rhs) {
+; %result = call <5 x i16> @llvm.ssub.sat.v5i16(<5 x i16> %lhs, <5 x i16> %rhs)
+; ret <5 x i16> %result
+; }
+
+define <3 x float> @v_ssubsat_v6i16(<6 x i16> %lhs, <6 x i16> %rhs) {
+; GFX6-LABEL: v_ssubsat_v6i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v12, -1, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX6-NEXT: v_subrev_i32_e32 v12, vcc, s4, v12
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v14, -1, v0
+; GFX6-NEXT: v_subrev_i32_e32 v14, vcc, s5, v14
+; GFX6-NEXT: v_max_i32_e32 v6, v12, v6
+; GFX6-NEXT: v_min_i32_e32 v6, v6, v14
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v7
+; GFX6-NEXT: v_max_i32_e32 v7, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v7, vcc, s4, v7
+; GFX6-NEXT: v_min_i32_e32 v12, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v12, vcc, s5, v12
+; GFX6-NEXT: v_max_i32_e32 v6, v7, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_min_i32_e32 v6, v6, v12
+; GFX6-NEXT: v_bfrev_b32_e32 v13, -2
+; GFX6-NEXT: v_max_i32_e32 v7, -1, v2
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v8
+; GFX6-NEXT: v_min_i32_e32 v8, -1, v2
+; GFX6-NEXT: v_sub_i32_e32 v7, vcc, v7, v13
+; GFX6-NEXT: v_subrev_i32_e32 v8, vcc, s5, v8
+; GFX6-NEXT: v_max_i32_e32 v6, v7, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_min_i32_e32 v6, v6, v8
+; GFX6-NEXT: v_max_i32_e32 v7, -1, v3
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v6
+; GFX6-NEXT: v_mov_b32_e32 v15, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v8, -1, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX6-NEXT: v_sub_i32_e32 v7, vcc, v7, v13
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, v8, v15
+; GFX6-NEXT: v_max_i32_e32 v6, v7, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_min_i32_e32 v6, v6, v8
+; GFX6-NEXT: v_max_i32_e32 v7, -1, v4
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, v3, v6
+; GFX6-NEXT: v_min_i32_e32 v8, -1, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v10
+; GFX6-NEXT: v_sub_i32_e32 v7, vcc, v7, v13
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, v8, v15
+; GFX6-NEXT: v_max_i32_e32 v6, v7, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_min_i32_e32 v6, v6, v8
+; GFX6-NEXT: v_max_i32_e32 v7, -1, v5
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, v4, v6
+; GFX6-NEXT: v_min_i32_e32 v8, -1, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v11
+; GFX6-NEXT: v_sub_i32_e32 v7, vcc, v7, v13
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, v8, v15
+; GFX6-NEXT: v_max_i32_e32 v6, v7, v6
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_min_i32_e32 v6, v6, v8
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v5, v6
+; GFX6-NEXT: v_ashrrev_i32_e32 v2, 16, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT: v_ashrrev_i32_e32 v5, 16, v5
+; GFX6-NEXT: v_and_b32_e32 v3, s4, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v4, 16, v4
+; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_v6i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s6, 0xffff
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v10, s6, v0
+; GFX8-NEXT: v_subrev_u16_e32 v10, s4, v10
+; GFX8-NEXT: s_mov_b32 s5, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v12, s6, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX8-NEXT: v_subrev_u16_e32 v12, s5, v12
+; GFX8-NEXT: v_max_i16_e32 v10, v10, v3
+; GFX8-NEXT: v_min_i16_e32 v10, v10, v12
+; GFX8-NEXT: v_max_i16_e32 v12, s6, v6
+; GFX8-NEXT: v_min_i16_e32 v14, s6, v6
+; GFX8-NEXT: v_subrev_u16_e32 v12, s4, v12
+; GFX8-NEXT: v_max_i16_sdwa v3, v12, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_max_i16_e32 v12, s6, v1
+; GFX8-NEXT: v_subrev_u16_e32 v14, s5, v14
+; GFX8-NEXT: v_min_i16_e32 v3, v3, v14
+; GFX8-NEXT: v_subrev_u16_e32 v12, s4, v12
+; GFX8-NEXT: v_min_i16_e32 v14, s6, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX8-NEXT: v_subrev_u16_e32 v14, s5, v14
+; GFX8-NEXT: v_max_i16_e32 v12, v12, v4
+; GFX8-NEXT: v_min_i16_e32 v12, v12, v14
+; GFX8-NEXT: v_max_i16_e32 v14, s6, v7
+; GFX8-NEXT: v_mov_b32_e32 v9, 0xffff
+; GFX8-NEXT: v_min_i16_e32 v15, v7, v9
+; GFX8-NEXT: v_subrev_u16_e32 v14, s4, v14
+; GFX8-NEXT: v_max_i16_sdwa v4, v14, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_subrev_u16_e32 v15, s5, v15
+; GFX8-NEXT: v_mov_b32_e32 v11, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v14, v2, v9
+; GFX8-NEXT: v_sub_u16_e32 v14, v14, v11
+; GFX8-NEXT: v_min_i16_e32 v4, v4, v15
+; GFX8-NEXT: v_mov_b32_e32 v13, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v15, v2, v9
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v2
+; GFX8-NEXT: v_sub_u16_e32 v15, v15, v13
+; GFX8-NEXT: v_max_i16_e32 v14, v14, v5
+; GFX8-NEXT: v_min_i16_e32 v14, v14, v15
+; GFX8-NEXT: v_max_i16_e32 v15, v8, v9
+; GFX8-NEXT: v_min_i16_e32 v9, v8, v9
+; GFX8-NEXT: v_sub_u16_e32 v11, v15, v11
+; GFX8-NEXT: v_sub_u16_e32 v9, v9, v13
+; GFX8-NEXT: v_max_i16_sdwa v5, v11, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v10
+; GFX8-NEXT: v_sub_u16_sdwa v3, v6, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v3
+; GFX8-NEXT: v_min_i16_e32 v5, v5, v9
+; GFX8-NEXT: v_sub_u16_e32 v1, v1, v12
+; GFX8-NEXT: v_sub_u16_sdwa v3, v7, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_sub_u16_e32 v2, v2, v14
+; GFX8-NEXT: v_sub_u16_sdwa v3, v8, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_v6i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, -1, -1
+; GFX9-NEXT: s_mov_b32 s5, 0xffff8000
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s4
+; GFX9-NEXT: v_pk_max_i16 v6, v0, s6
+; GFX9-NEXT: v_pk_sub_i16 v6, v6, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s5
+; GFX9-NEXT: v_pk_min_i16 v7, v0, s6
+; GFX9-NEXT: v_pk_max_i16 v3, v6, v3
+; GFX9-NEXT: v_pk_sub_i16 v7, v7, s5
+; GFX9-NEXT: v_pk_min_i16 v3, v3, v7
+; GFX9-NEXT: v_pk_sub_i16 v0, v0, v3
+; GFX9-NEXT: v_pk_max_i16 v3, v1, s6
+; GFX9-NEXT: v_pk_sub_i16 v3, v3, s4
+; GFX9-NEXT: v_pk_min_i16 v6, v1, s6
+; GFX9-NEXT: v_pk_max_i16 v3, v3, v4
+; GFX9-NEXT: v_pk_sub_i16 v6, v6, s5
+; GFX9-NEXT: v_pk_min_i16 v3, v3, v6
+; GFX9-NEXT: v_pk_sub_i16 v1, v1, v3
+; GFX9-NEXT: v_pk_max_i16 v3, v2, s6
+; GFX9-NEXT: v_pk_sub_i16 v3, v3, s4
+; GFX9-NEXT: v_pk_min_i16 v4, v2, s6
+; GFX9-NEXT: v_pk_sub_i16 v4, v4, s5
+; GFX9-NEXT: v_pk_max_i16 v3, v3, v5
+; GFX9-NEXT: v_pk_min_i16 v3, v3, v4
+; GFX9-NEXT: v_pk_sub_i16 v2, v2, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_v6i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, -1, -1
+; GFX10-NEXT: s_movk_i32 s4, 0x7fff
+; GFX10-NEXT: v_pk_max_i16 v6, v0, s5
+; GFX10-NEXT: v_pk_max_i16 v8, v1, s5
+; GFX10-NEXT: v_pk_max_i16 v9, v2, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s4, s4
+; GFX10-NEXT: v_pk_min_i16 v7, v0, s5
+; GFX10-NEXT: v_pk_sub_i16 v6, v6, s4
+; GFX10-NEXT: v_pk_sub_i16 v15, v8, s4
+; GFX10-NEXT: v_pk_sub_i16 v19, v9, s4
+; GFX10-NEXT: v_pk_min_i16 v10, v1, s5
+; GFX10-NEXT: v_pk_min_i16 v11, v2, s5
+; GFX10-NEXT: s_mov_b32 s6, 0xffff8000
+; GFX10-NEXT: v_pk_max_i16 v14, v6, v3
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s6, s6
+; GFX10-NEXT: v_pk_max_i16 v4, v15, v4
+; GFX10-NEXT: v_pk_sub_i16 v7, v7, s6
+; GFX10-NEXT: v_pk_sub_i16 v6, v10, s6
+; GFX10-NEXT: v_pk_sub_i16 v8, v11, s6
+; GFX10-NEXT: v_pk_max_i16 v5, v19, v5
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_pk_min_i16 v3, v14, v7
+; GFX10-NEXT: v_pk_min_i16 v4, v4, v6
+; GFX10-NEXT: v_pk_min_i16 v5, v5, v8
+; GFX10-NEXT: v_pk_sub_i16 v0, v0, v3
+; GFX10-NEXT: v_pk_sub_i16 v1, v1, v4
+; GFX10-NEXT: v_pk_sub_i16 v2, v2, v5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <6 x i16> @llvm.ssub.sat.v6i16(<6 x i16> %lhs, <6 x i16> %rhs)
+ %cast = bitcast <6 x i16> %result to <3 x float>
+ ret <3 x float> %cast
+}
+
+define amdgpu_ps <3 x i32> @s_ssubsat_v6i16(<6 x i16> inreg %lhs, <6 x i16> inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_v6i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s6, s6, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_brev_b32 s12, -2
+; GFX6-NEXT: s_cselect_b32 s14, s0, -1
+; GFX6-NEXT: s_sub_i32 s14, s14, s12
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_mov_b32 s13, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s15, s0, -1
+; GFX6-NEXT: s_sub_i32 s15, s15, s13
+; GFX6-NEXT: s_cmp_gt_i32 s14, s6
+; GFX6-NEXT: s_cselect_b32 s6, s14, s6
+; GFX6-NEXT: s_cmp_lt_i32 s6, s15
+; GFX6-NEXT: s_cselect_b32 s6, s6, s15
+; GFX6-NEXT: s_sub_i32 s0, s0, s6
+; GFX6-NEXT: s_ashr_i32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s6, s7, 16
+; GFX6-NEXT: s_cmp_gt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s7, s1, -1
+; GFX6-NEXT: s_sub_i32 s7, s7, s12
+; GFX6-NEXT: s_cmp_lt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s14, s1, -1
+; GFX6-NEXT: s_sub_i32 s14, s14, s13
+; GFX6-NEXT: s_cmp_gt_i32 s7, s6
+; GFX6-NEXT: s_cselect_b32 s6, s7, s6
+; GFX6-NEXT: s_cmp_lt_i32 s6, s14
+; GFX6-NEXT: s_cselect_b32 s6, s6, s14
+; GFX6-NEXT: s_sub_i32 s1, s1, s6
+; GFX6-NEXT: s_ashr_i32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s6, s8, 16
+; GFX6-NEXT: s_cmp_gt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s7, s2, -1
+; GFX6-NEXT: s_sub_i32 s7, s7, s12
+; GFX6-NEXT: s_cmp_lt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s8, s2, -1
+; GFX6-NEXT: s_sub_i32 s8, s8, s13
+; GFX6-NEXT: s_cmp_gt_i32 s7, s6
+; GFX6-NEXT: s_cselect_b32 s6, s7, s6
+; GFX6-NEXT: s_cmp_lt_i32 s6, s8
+; GFX6-NEXT: s_cselect_b32 s6, s6, s8
+; GFX6-NEXT: s_sub_i32 s2, s2, s6
+; GFX6-NEXT: s_ashr_i32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s6, s9, 16
+; GFX6-NEXT: s_cmp_gt_i32 s3, -1
+; GFX6-NEXT: s_cselect_b32 s7, s3, -1
+; GFX6-NEXT: s_sub_i32 s7, s7, s12
+; GFX6-NEXT: s_cmp_lt_i32 s3, -1
+; GFX6-NEXT: s_cselect_b32 s8, s3, -1
+; GFX6-NEXT: s_sub_i32 s8, s8, s13
+; GFX6-NEXT: s_cmp_gt_i32 s7, s6
+; GFX6-NEXT: s_cselect_b32 s6, s7, s6
+; GFX6-NEXT: s_cmp_lt_i32 s6, s8
+; GFX6-NEXT: s_cselect_b32 s6, s6, s8
+; GFX6-NEXT: s_sub_i32 s3, s3, s6
+; GFX6-NEXT: s_ashr_i32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_lshl_b32 s6, s10, 16
+; GFX6-NEXT: s_cmp_gt_i32 s4, -1
+; GFX6-NEXT: s_cselect_b32 s7, s4, -1
+; GFX6-NEXT: s_sub_i32 s7, s7, s12
+; GFX6-NEXT: s_cmp_lt_i32 s4, -1
+; GFX6-NEXT: s_cselect_b32 s8, s4, -1
+; GFX6-NEXT: s_sub_i32 s8, s8, s13
+; GFX6-NEXT: s_cmp_gt_i32 s7, s6
+; GFX6-NEXT: s_cselect_b32 s6, s7, s6
+; GFX6-NEXT: s_cmp_lt_i32 s6, s8
+; GFX6-NEXT: s_cselect_b32 s6, s6, s8
+; GFX6-NEXT: s_sub_i32 s4, s4, s6
+; GFX6-NEXT: s_ashr_i32 s4, s4, 16
+; GFX6-NEXT: s_lshl_b32 s5, s5, 16
+; GFX6-NEXT: s_lshl_b32 s6, s11, 16
+; GFX6-NEXT: s_cmp_gt_i32 s5, -1
+; GFX6-NEXT: s_cselect_b32 s7, s5, -1
+; GFX6-NEXT: s_sub_i32 s7, s7, s12
+; GFX6-NEXT: s_cmp_lt_i32 s5, -1
+; GFX6-NEXT: s_cselect_b32 s8, s5, -1
+; GFX6-NEXT: s_sub_i32 s8, s8, s13
+; GFX6-NEXT: s_cmp_gt_i32 s7, s6
+; GFX6-NEXT: s_cselect_b32 s6, s7, s6
+; GFX6-NEXT: s_cmp_lt_i32 s6, s8
+; GFX6-NEXT: s_cselect_b32 s6, s6, s8
+; GFX6-NEXT: s_sub_i32 s5, s5, s6
+; GFX6-NEXT: s_mov_b32 s6, 0xffff
+; GFX6-NEXT: s_and_b32 s1, s1, s6
+; GFX6-NEXT: s_and_b32 s0, s0, s6
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s6
+; GFX6-NEXT: s_and_b32 s2, s3, s6
+; GFX6-NEXT: s_ashr_i32 s5, s5, 16
+; GFX6-NEXT: s_and_b32 s3, s5, s6
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_or_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s2, s4, s6
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_or_b32 s2, s2, s3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_v6i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshr_b32 s9, s3, 16
+; GFX8-NEXT: s_lshr_b32 s10, s4, 16
+; GFX8-NEXT: s_lshr_b32 s11, s5, 16
+; GFX8-NEXT: s_lshr_b32 s6, s0, 16
+; GFX8-NEXT: s_lshr_b32 s7, s1, 16
+; GFX8-NEXT: s_lshr_b32 s8, s2, 16
+; GFX8-NEXT: s_sext_i32_i16 s14, s0
+; GFX8-NEXT: s_sext_i32_i16 s15, 0xffff
+; GFX8-NEXT: s_cmp_gt_i32 s14, s15
+; GFX8-NEXT: s_movk_i32 s12, 0x7fff
+; GFX8-NEXT: s_cselect_b32 s16, s14, s15
+; GFX8-NEXT: s_sub_i32 s16, s16, s12
+; GFX8-NEXT: s_cmp_lt_i32 s14, s15
+; GFX8-NEXT: s_mov_b32 s13, 0x8000
+; GFX8-NEXT: s_cselect_b32 s14, s14, s15
+; GFX8-NEXT: s_sub_i32 s14, s14, s13
+; GFX8-NEXT: s_sext_i32_i16 s16, s16
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_cmp_gt_i32 s16, s3
+; GFX8-NEXT: s_cselect_b32 s3, s16, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_sext_i32_i16 s14, s14
+; GFX8-NEXT: s_cmp_lt_i32 s3, s14
+; GFX8-NEXT: s_cselect_b32 s3, s3, s14
+; GFX8-NEXT: s_sub_i32 s0, s0, s3
+; GFX8-NEXT: s_sext_i32_i16 s3, s6
+; GFX8-NEXT: s_cmp_gt_i32 s3, s15
+; GFX8-NEXT: s_cselect_b32 s14, s3, s15
+; GFX8-NEXT: s_sub_i32 s14, s14, s12
+; GFX8-NEXT: s_cmp_lt_i32 s3, s15
+; GFX8-NEXT: s_cselect_b32 s3, s3, s15
+; GFX8-NEXT: s_sub_i32 s3, s3, s13
+; GFX8-NEXT: s_sext_i32_i16 s14, s14
+; GFX8-NEXT: s_sext_i32_i16 s9, s9
+; GFX8-NEXT: s_cmp_gt_i32 s14, s9
+; GFX8-NEXT: s_cselect_b32 s9, s14, s9
+; GFX8-NEXT: s_sext_i32_i16 s9, s9
+; GFX8-NEXT: s_sext_i32_i16 s3, s3
+; GFX8-NEXT: s_cmp_lt_i32 s9, s3
+; GFX8-NEXT: s_cselect_b32 s3, s9, s3
+; GFX8-NEXT: s_sub_i32 s3, s6, s3
+; GFX8-NEXT: s_sext_i32_i16 s6, s1
+; GFX8-NEXT: s_cmp_gt_i32 s6, s15
+; GFX8-NEXT: s_cselect_b32 s9, s6, s15
+; GFX8-NEXT: s_sub_i32 s9, s9, s12
+; GFX8-NEXT: s_cmp_lt_i32 s6, s15
+; GFX8-NEXT: s_cselect_b32 s6, s6, s15
+; GFX8-NEXT: s_sub_i32 s6, s6, s13
+; GFX8-NEXT: s_sext_i32_i16 s9, s9
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_cmp_gt_i32 s9, s4
+; GFX8-NEXT: s_cselect_b32 s4, s9, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_cmp_lt_i32 s4, s6
+; GFX8-NEXT: s_cselect_b32 s4, s4, s6
+; GFX8-NEXT: s_sub_i32 s1, s1, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s7
+; GFX8-NEXT: s_cmp_gt_i32 s4, s15
+; GFX8-NEXT: s_cselect_b32 s6, s4, s15
+; GFX8-NEXT: s_sub_i32 s6, s6, s12
+; GFX8-NEXT: s_cmp_lt_i32 s4, s15
+; GFX8-NEXT: s_cselect_b32 s4, s4, s15
+; GFX8-NEXT: s_sub_i32 s4, s4, s13
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_sext_i32_i16 s9, s10
+; GFX8-NEXT: s_cmp_gt_i32 s6, s9
+; GFX8-NEXT: s_cselect_b32 s6, s6, s9
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_cmp_lt_i32 s6, s4
+; GFX8-NEXT: s_cselect_b32 s4, s6, s4
+; GFX8-NEXT: s_sub_i32 s4, s7, s4
+; GFX8-NEXT: s_sext_i32_i16 s6, s2
+; GFX8-NEXT: s_cmp_gt_i32 s6, s15
+; GFX8-NEXT: s_cselect_b32 s7, s6, s15
+; GFX8-NEXT: s_sub_i32 s7, s7, s12
+; GFX8-NEXT: s_cmp_lt_i32 s6, s15
+; GFX8-NEXT: s_cselect_b32 s6, s6, s15
+; GFX8-NEXT: s_sub_i32 s6, s6, s13
+; GFX8-NEXT: s_sext_i32_i16 s7, s7
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_cmp_gt_i32 s7, s5
+; GFX8-NEXT: s_cselect_b32 s5, s7, s5
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_cmp_lt_i32 s5, s6
+; GFX8-NEXT: s_cselect_b32 s5, s5, s6
+; GFX8-NEXT: s_sub_i32 s2, s2, s5
+; GFX8-NEXT: s_sext_i32_i16 s5, s8
+; GFX8-NEXT: s_cmp_gt_i32 s5, s15
+; GFX8-NEXT: s_cselect_b32 s6, s5, s15
+; GFX8-NEXT: s_sub_i32 s6, s6, s12
+; GFX8-NEXT: s_cmp_lt_i32 s5, s15
+; GFX8-NEXT: s_cselect_b32 s5, s5, s15
+; GFX8-NEXT: s_sub_i32 s5, s5, s13
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_sext_i32_i16 s7, s11
+; GFX8-NEXT: s_cmp_gt_i32 s6, s7
+; GFX8-NEXT: s_cselect_b32 s6, s6, s7
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_cmp_lt_i32 s6, s5
+; GFX8-NEXT: s_cselect_b32 s5, s6, s5
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s3, s3, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s3
+; GFX8-NEXT: s_bfe_u32 s3, s4, 0x100000
+; GFX8-NEXT: s_sub_i32 s5, s8, s5
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_lshl_b32 s3, s3, 16
+; GFX8-NEXT: s_or_b32 s1, s1, s3
+; GFX8-NEXT: s_bfe_u32 s3, s5, 0x100000
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_lshl_b32 s3, s3, 16
+; GFX8-NEXT: s_or_b32 s2, s2, s3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_v6i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, -1, -1
+; GFX9-NEXT: s_sext_i32_i16 s11, s8
+; GFX9-NEXT: s_sext_i32_i16 s9, s0
+; GFX9-NEXT: s_ashr_i32 s10, s0, 16
+; GFX9-NEXT: s_ashr_i32 s8, s8, 16
+; GFX9-NEXT: s_cmp_gt_i32 s9, s11
+; GFX9-NEXT: s_cselect_b32 s12, s9, s11
+; GFX9-NEXT: s_cmp_gt_i32 s10, s8
+; GFX9-NEXT: s_movk_i32 s6, 0x7fff
+; GFX9-NEXT: s_cselect_b32 s13, s10, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s13
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s6
+; GFX9-NEXT: s_lshr_b32 s13, s12, 16
+; GFX9-NEXT: s_lshr_b32 s14, s6, 16
+; GFX9-NEXT: s_sub_i32 s12, s12, s6
+; GFX9-NEXT: s_sub_i32 s13, s13, s14
+; GFX9-NEXT: s_cmp_lt_i32 s9, s11
+; GFX9-NEXT: s_cselect_b32 s9, s9, s11
+; GFX9-NEXT: s_cmp_lt_i32 s10, s8
+; GFX9-NEXT: s_mov_b32 s7, 0xffff8000
+; GFX9-NEXT: s_cselect_b32 s10, s10, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s7
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s13
+; GFX9-NEXT: s_lshr_b32 s10, s9, 16
+; GFX9-NEXT: s_lshr_b32 s13, s7, 16
+; GFX9-NEXT: s_sub_i32 s9, s9, s7
+; GFX9-NEXT: s_sub_i32 s10, s10, s13
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s10
+; GFX9-NEXT: s_sext_i32_i16 s10, s12
+; GFX9-NEXT: s_sext_i32_i16 s15, s3
+; GFX9-NEXT: s_ashr_i32 s12, s12, 16
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_cmp_gt_i32 s10, s15
+; GFX9-NEXT: s_cselect_b32 s10, s10, s15
+; GFX9-NEXT: s_cmp_gt_i32 s12, s3
+; GFX9-NEXT: s_cselect_b32 s3, s12, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s10, s3
+; GFX9-NEXT: s_sext_i32_i16 s10, s3
+; GFX9-NEXT: s_sext_i32_i16 s12, s9
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_ashr_i32 s9, s9, 16
+; GFX9-NEXT: s_cmp_lt_i32 s10, s12
+; GFX9-NEXT: s_cselect_b32 s10, s10, s12
+; GFX9-NEXT: s_cmp_lt_i32 s3, s9
+; GFX9-NEXT: s_cselect_b32 s3, s3, s9
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s10, s3
+; GFX9-NEXT: s_lshr_b32 s9, s0, 16
+; GFX9-NEXT: s_lshr_b32 s10, s3, 16
+; GFX9-NEXT: s_sub_i32 s0, s0, s3
+; GFX9-NEXT: s_sub_i32 s3, s9, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s3
+; GFX9-NEXT: s_sext_i32_i16 s3, s1
+; GFX9-NEXT: s_ashr_i32 s9, s1, 16
+; GFX9-NEXT: s_cmp_gt_i32 s3, s11
+; GFX9-NEXT: s_cselect_b32 s10, s3, s11
+; GFX9-NEXT: s_cmp_gt_i32 s9, s8
+; GFX9-NEXT: s_cselect_b32 s12, s9, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s12
+; GFX9-NEXT: s_lshr_b32 s12, s10, 16
+; GFX9-NEXT: s_sub_i32 s10, s10, s6
+; GFX9-NEXT: s_sub_i32 s12, s12, s14
+; GFX9-NEXT: s_cmp_lt_i32 s3, s11
+; GFX9-NEXT: s_cselect_b32 s3, s3, s11
+; GFX9-NEXT: s_cmp_lt_i32 s9, s8
+; GFX9-NEXT: s_cselect_b32 s9, s9, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX9-NEXT: s_lshr_b32 s9, s3, 16
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s12
+; GFX9-NEXT: s_sub_i32 s3, s3, s7
+; GFX9-NEXT: s_sub_i32 s9, s9, s13
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX9-NEXT: s_sext_i32_i16 s9, s10
+; GFX9-NEXT: s_sext_i32_i16 s12, s4
+; GFX9-NEXT: s_ashr_i32 s10, s10, 16
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_cmp_gt_i32 s9, s12
+; GFX9-NEXT: s_cselect_b32 s9, s9, s12
+; GFX9-NEXT: s_cmp_gt_i32 s10, s4
+; GFX9-NEXT: s_cselect_b32 s4, s10, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s9, s4
+; GFX9-NEXT: s_sext_i32_i16 s9, s4
+; GFX9-NEXT: s_sext_i32_i16 s10, s3
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_cmp_lt_i32 s9, s10
+; GFX9-NEXT: s_cselect_b32 s9, s9, s10
+; GFX9-NEXT: s_cmp_lt_i32 s4, s3
+; GFX9-NEXT: s_cselect_b32 s3, s4, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s9, s3
+; GFX9-NEXT: s_lshr_b32 s4, s1, 16
+; GFX9-NEXT: s_lshr_b32 s9, s3, 16
+; GFX9-NEXT: s_sub_i32 s1, s1, s3
+; GFX9-NEXT: s_sub_i32 s3, s4, s9
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s3
+; GFX9-NEXT: s_sext_i32_i16 s3, s2
+; GFX9-NEXT: s_ashr_i32 s4, s2, 16
+; GFX9-NEXT: s_cmp_gt_i32 s3, s11
+; GFX9-NEXT: s_cselect_b32 s9, s3, s11
+; GFX9-NEXT: s_cmp_gt_i32 s4, s8
+; GFX9-NEXT: s_cselect_b32 s10, s4, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s10
+; GFX9-NEXT: s_lshr_b32 s10, s9, 16
+; GFX9-NEXT: s_sub_i32 s6, s9, s6
+; GFX9-NEXT: s_sub_i32 s9, s10, s14
+; GFX9-NEXT: s_cmp_lt_i32 s3, s11
+; GFX9-NEXT: s_cselect_b32 s3, s3, s11
+; GFX9-NEXT: s_cmp_lt_i32 s4, s8
+; GFX9-NEXT: s_cselect_b32 s4, s4, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4
+; GFX9-NEXT: s_lshr_b32 s4, s3, 16
+; GFX9-NEXT: s_sub_i32 s3, s3, s7
+; GFX9-NEXT: s_sub_i32 s4, s4, s13
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s9
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4
+; GFX9-NEXT: s_sext_i32_i16 s4, s6
+; GFX9-NEXT: s_sext_i32_i16 s7, s5
+; GFX9-NEXT: s_ashr_i32 s6, s6, 16
+; GFX9-NEXT: s_ashr_i32 s5, s5, 16
+; GFX9-NEXT: s_cmp_gt_i32 s4, s7
+; GFX9-NEXT: s_cselect_b32 s4, s4, s7
+; GFX9-NEXT: s_cmp_gt_i32 s6, s5
+; GFX9-NEXT: s_cselect_b32 s5, s6, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX9-NEXT: s_sext_i32_i16 s5, s4
+; GFX9-NEXT: s_sext_i32_i16 s6, s3
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_ashr_i32 s3, s3, 16
+; GFX9-NEXT: s_cmp_lt_i32 s5, s6
+; GFX9-NEXT: s_cselect_b32 s5, s5, s6
+; GFX9-NEXT: s_cmp_lt_i32 s4, s3
+; GFX9-NEXT: s_cselect_b32 s3, s4, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s5, s3
+; GFX9-NEXT: s_lshr_b32 s4, s2, 16
+; GFX9-NEXT: s_lshr_b32 s5, s3, 16
+; GFX9-NEXT: s_sub_i32 s2, s2, s3
+; GFX9-NEXT: s_sub_i32 s3, s4, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_v6i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, -1, -1
+; GFX10-NEXT: s_sext_i32_i16 s7, s0
+; GFX10-NEXT: s_sext_i32_i16 s9, s6
+; GFX10-NEXT: s_ashr_i32 s8, s0, 16
+; GFX10-NEXT: s_ashr_i32 s6, s6, 16
+; GFX10-NEXT: s_cmp_gt_i32 s7, s9
+; GFX10-NEXT: s_movk_i32 s12, 0x7fff
+; GFX10-NEXT: s_cselect_b32 s10, s7, s9
+; GFX10-NEXT: s_cmp_gt_i32 s8, s6
+; GFX10-NEXT: s_mov_b32 s14, 0xffff8000
+; GFX10-NEXT: s_cselect_b32 s11, s8, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s14, s14, s14
+; GFX10-NEXT: s_pack_ll_b32_b16 s10, s10, s11
+; GFX10-NEXT: s_pack_ll_b32_b16 s11, s12, s12
+; GFX10-NEXT: s_lshr_b32 s12, s10, 16
+; GFX10-NEXT: s_lshr_b32 s13, s11, 16
+; GFX10-NEXT: s_sub_i32 s10, s10, s11
+; GFX10-NEXT: s_sub_i32 s12, s12, s13
+; GFX10-NEXT: s_cmp_lt_i32 s7, s9
+; GFX10-NEXT: s_sext_i32_i16 s16, s3
+; GFX10-NEXT: s_cselect_b32 s7, s7, s9
+; GFX10-NEXT: s_cmp_lt_i32 s8, s6
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s8, s8, s6
+; GFX10-NEXT: s_ashr_i32 s3, s3, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s8, s10, s12
+; GFX10-NEXT: s_lshr_b32 s10, s7, 16
+; GFX10-NEXT: s_lshr_b32 s12, s14, 16
+; GFX10-NEXT: s_sext_i32_i16 s15, s8
+; GFX10-NEXT: s_sub_i32 s7, s7, s14
+; GFX10-NEXT: s_sub_i32 s10, s10, s12
+; GFX10-NEXT: s_ashr_i32 s8, s8, 16
+; GFX10-NEXT: s_cmp_gt_i32 s15, s16
+; GFX10-NEXT: s_pack_ll_b32_b16 s7, s7, s10
+; GFX10-NEXT: s_cselect_b32 s15, s15, s16
+; GFX10-NEXT: s_cmp_gt_i32 s8, s3
+; GFX10-NEXT: s_sext_i32_i16 s10, s7
+; GFX10-NEXT: s_cselect_b32 s3, s8, s3
+; GFX10-NEXT: s_ashr_i32 s7, s7, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s15, s3
+; GFX10-NEXT: s_sext_i32_i16 s16, s4
+; GFX10-NEXT: s_sext_i32_i16 s8, s3
+; GFX10-NEXT: s_ashr_i32 s3, s3, 16
+; GFX10-NEXT: s_cmp_lt_i32 s8, s10
+; GFX10-NEXT: s_cselect_b32 s8, s8, s10
+; GFX10-NEXT: s_cmp_lt_i32 s3, s7
+; GFX10-NEXT: s_cselect_b32 s3, s3, s7
+; GFX10-NEXT: s_lshr_b32 s7, s0, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s8, s3
+; GFX10-NEXT: s_lshr_b32 s8, s3, 16
+; GFX10-NEXT: s_sub_i32 s0, s0, s3
+; GFX10-NEXT: s_sub_i32 s3, s7, s8
+; GFX10-NEXT: s_sext_i32_i16 s7, s1
+; GFX10-NEXT: s_ashr_i32 s8, s1, 16
+; GFX10-NEXT: s_cmp_gt_i32 s7, s9
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s3
+; GFX10-NEXT: s_cselect_b32 s10, s7, s9
+; GFX10-NEXT: s_cmp_gt_i32 s8, s6
+; GFX10-NEXT: s_cselect_b32 s15, s8, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s10, s10, s15
+; GFX10-NEXT: s_lshr_b32 s15, s10, 16
+; GFX10-NEXT: s_sub_i32 s10, s10, s11
+; GFX10-NEXT: s_sub_i32 s15, s15, s13
+; GFX10-NEXT: s_cmp_lt_i32 s7, s9
+; GFX10-NEXT: s_cselect_b32 s7, s7, s9
+; GFX10-NEXT: s_cmp_lt_i32 s8, s6
+; GFX10-NEXT: s_cselect_b32 s8, s8, s6
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s7, s7, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s8, s10, s15
+; GFX10-NEXT: s_lshr_b32 s10, s7, 16
+; GFX10-NEXT: s_sext_i32_i16 s15, s8
+; GFX10-NEXT: s_sub_i32 s7, s7, s14
+; GFX10-NEXT: s_sub_i32 s10, s10, s12
+; GFX10-NEXT: s_ashr_i32 s8, s8, 16
+; GFX10-NEXT: s_cmp_gt_i32 s15, s16
+; GFX10-NEXT: s_pack_ll_b32_b16 s7, s7, s10
+; GFX10-NEXT: s_cselect_b32 s15, s15, s16
+; GFX10-NEXT: s_cmp_gt_i32 s8, s4
+; GFX10-NEXT: s_sext_i32_i16 s10, s7
+; GFX10-NEXT: s_cselect_b32 s4, s8, s4
+; GFX10-NEXT: s_ashr_i32 s7, s7, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s15, s4
+; GFX10-NEXT: s_sext_i32_i16 s8, s4
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_cmp_lt_i32 s8, s10
+; GFX10-NEXT: s_cselect_b32 s8, s8, s10
+; GFX10-NEXT: s_cmp_lt_i32 s4, s7
+; GFX10-NEXT: s_cselect_b32 s4, s4, s7
+; GFX10-NEXT: s_lshr_b32 s7, s1, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s8, s4
+; GFX10-NEXT: s_lshr_b32 s8, s4, 16
+; GFX10-NEXT: s_sub_i32 s1, s1, s4
+; GFX10-NEXT: s_sub_i32 s4, s7, s8
+; GFX10-NEXT: s_sext_i32_i16 s7, s2
+; GFX10-NEXT: s_ashr_i32 s8, s2, 16
+; GFX10-NEXT: s_cmp_gt_i32 s7, s9
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s4
+; GFX10-NEXT: s_cselect_b32 s10, s7, s9
+; GFX10-NEXT: s_cmp_gt_i32 s8, s6
+; GFX10-NEXT: s_cselect_b32 s15, s8, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s10, s10, s15
+; GFX10-NEXT: s_lshr_b32 s15, s10, 16
+; GFX10-NEXT: s_sub_i32 s10, s10, s11
+; GFX10-NEXT: s_sub_i32 s11, s15, s13
+; GFX10-NEXT: s_cmp_lt_i32 s7, s9
+; GFX10-NEXT: s_cselect_b32 s7, s7, s9
+; GFX10-NEXT: s_cmp_lt_i32 s8, s6
+; GFX10-NEXT: s_cselect_b32 s6, s8, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s7, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s7, s10, s11
+; GFX10-NEXT: s_lshr_b32 s8, s6, 16
+; GFX10-NEXT: s_sext_i32_i16 s9, s7
+; GFX10-NEXT: s_sext_i32_i16 s10, s5
+; GFX10-NEXT: s_sub_i32 s6, s6, s14
+; GFX10-NEXT: s_sub_i32 s8, s8, s12
+; GFX10-NEXT: s_ashr_i32 s7, s7, 16
+; GFX10-NEXT: s_ashr_i32 s5, s5, 16
+; GFX10-NEXT: s_cmp_gt_i32 s9, s10
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s6, s8
+; GFX10-NEXT: s_cselect_b32 s9, s9, s10
+; GFX10-NEXT: s_cmp_gt_i32 s7, s5
+; GFX10-NEXT: s_sext_i32_i16 s8, s6
+; GFX10-NEXT: s_cselect_b32 s5, s7, s5
+; GFX10-NEXT: s_ashr_i32 s6, s6, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s9, s5
+; GFX10-NEXT: s_sext_i32_i16 s7, s5
+; GFX10-NEXT: s_ashr_i32 s5, s5, 16
+; GFX10-NEXT: s_cmp_lt_i32 s7, s8
+; GFX10-NEXT: s_cselect_b32 s7, s7, s8
+; GFX10-NEXT: s_cmp_lt_i32 s5, s6
+; GFX10-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s7, s5
+; GFX10-NEXT: s_lshr_b32 s5, s2, 16
+; GFX10-NEXT: s_lshr_b32 s6, s3, 16
+; GFX10-NEXT: s_sub_i32 s2, s2, s3
+; GFX10-NEXT: s_sub_i32 s3, s5, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <6 x i16> @llvm.ssub.sat.v6i16(<6 x i16> %lhs, <6 x i16> %rhs)
+ %cast = bitcast <6 x i16> %result to <3 x i32>
+ ret <3 x i32> %cast
+}
+
+define <4 x float> @v_ssubsat_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; GFX6-LABEL: v_ssubsat_v8i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_brev_b32 s4, -2
+; GFX6-NEXT: v_max_i32_e32 v16, -1, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; GFX6-NEXT: v_subrev_i32_e32 v16, vcc, s4, v16
+; GFX6-NEXT: s_mov_b32 s5, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v18, -1, v0
+; GFX6-NEXT: v_subrev_i32_e32 v18, vcc, s5, v18
+; GFX6-NEXT: v_max_i32_e32 v8, v16, v8
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v18
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v9
+; GFX6-NEXT: v_max_i32_e32 v9, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v9, vcc, s4, v9
+; GFX6-NEXT: v_min_i32_e32 v16, -1, v1
+; GFX6-NEXT: v_subrev_i32_e32 v16, vcc, s5, v16
+; GFX6-NEXT: v_max_i32_e32 v8, v9, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v16
+; GFX6-NEXT: v_bfrev_b32_e32 v17, -2
+; GFX6-NEXT: v_max_i32_e32 v9, -1, v2
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v10
+; GFX6-NEXT: v_min_i32_e32 v10, -1, v2
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, v9, v17
+; GFX6-NEXT: v_subrev_i32_e32 v10, vcc, s5, v10
+; GFX6-NEXT: v_max_i32_e32 v8, v9, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v10
+; GFX6-NEXT: v_max_i32_e32 v9, -1, v3
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v8
+; GFX6-NEXT: v_mov_b32_e32 v19, 0x80000000
+; GFX6-NEXT: v_min_i32_e32 v10, -1, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v11
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, v9, v17
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, v10, v19
+; GFX6-NEXT: v_max_i32_e32 v8, v9, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v10
+; GFX6-NEXT: v_max_i32_e32 v9, -1, v4
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, v3, v8
+; GFX6-NEXT: v_min_i32_e32 v10, -1, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v12
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, v9, v17
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, v10, v19
+; GFX6-NEXT: v_max_i32_e32 v8, v9, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v10
+; GFX6-NEXT: v_max_i32_e32 v9, -1, v5
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, v4, v8
+; GFX6-NEXT: v_min_i32_e32 v10, -1, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v13
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, v9, v17
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, v10, v19
+; GFX6-NEXT: v_max_i32_e32 v8, v9, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v10
+; GFX6-NEXT: v_max_i32_e32 v9, -1, v6
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v5, v8
+; GFX6-NEXT: v_min_i32_e32 v10, -1, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v14
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, v9, v17
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, v10, v19
+; GFX6-NEXT: v_max_i32_e32 v8, v9, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v10
+; GFX6-NEXT: v_max_i32_e32 v9, -1, v7
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GFX6-NEXT: v_min_i32_e32 v10, -1, v7
+; GFX6-NEXT: v_ashrrev_i32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v15
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, v9, v17
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 16, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, v10, v19
+; GFX6-NEXT: v_max_i32_e32 v8, v9, v8
+; GFX6-NEXT: v_min_i32_e32 v8, v8, v10
+; GFX6-NEXT: v_ashrrev_i32_e32 v2, 16, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_sub_i32_e32 v7, vcc, v7, v8
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT: v_ashrrev_i32_e32 v5, 16, v5
+; GFX6-NEXT: v_and_b32_e32 v3, s4, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_ashrrev_i32_e32 v4, 16, v4
+; GFX6-NEXT: v_ashrrev_i32_e32 v7, 16, v7
+; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v4
+; GFX6-NEXT: v_and_b32_e32 v4, s4, v7
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_ashrrev_i32_e32 v6, 16, v6
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX6-NEXT: v_and_b32_e32 v3, s4, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_v8i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s6, 0xffff
+; GFX8-NEXT: s_movk_i32 s4, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v13, s6, v0
+; GFX8-NEXT: v_subrev_u16_e32 v13, s4, v13
+; GFX8-NEXT: s_mov_b32 s5, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v15, s6, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX8-NEXT: v_subrev_u16_e32 v15, s5, v15
+; GFX8-NEXT: v_max_i16_e32 v13, v13, v4
+; GFX8-NEXT: v_min_i16_e32 v13, v13, v15
+; GFX8-NEXT: v_max_i16_e32 v15, s6, v8
+; GFX8-NEXT: v_min_i16_e32 v17, s6, v8
+; GFX8-NEXT: v_subrev_u16_e32 v15, s4, v15
+; GFX8-NEXT: v_max_i16_sdwa v4, v15, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_max_i16_e32 v15, s6, v1
+; GFX8-NEXT: v_subrev_u16_e32 v17, s5, v17
+; GFX8-NEXT: v_min_i16_e32 v4, v4, v17
+; GFX8-NEXT: v_subrev_u16_e32 v15, s4, v15
+; GFX8-NEXT: v_min_i16_e32 v17, s6, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX8-NEXT: v_subrev_u16_e32 v17, s5, v17
+; GFX8-NEXT: v_max_i16_e32 v15, v15, v5
+; GFX8-NEXT: v_min_i16_e32 v15, v15, v17
+; GFX8-NEXT: v_max_i16_e32 v17, s6, v9
+; GFX8-NEXT: v_mov_b32_e32 v12, 0xffff
+; GFX8-NEXT: v_min_i16_e32 v18, v9, v12
+; GFX8-NEXT: v_subrev_u16_e32 v17, s4, v17
+; GFX8-NEXT: v_max_i16_sdwa v5, v17, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_subrev_u16_e32 v18, s5, v18
+; GFX8-NEXT: v_mov_b32_e32 v14, 0x7fff
+; GFX8-NEXT: v_max_i16_e32 v17, v2, v12
+; GFX8-NEXT: v_sub_u16_e32 v17, v17, v14
+; GFX8-NEXT: v_min_i16_e32 v5, v5, v18
+; GFX8-NEXT: v_mov_b32_e32 v16, 0x8000
+; GFX8-NEXT: v_min_i16_e32 v18, v2, v12
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX8-NEXT: v_sub_u16_e32 v18, v18, v16
+; GFX8-NEXT: v_max_i16_e32 v17, v17, v6
+; GFX8-NEXT: v_min_i16_e32 v17, v17, v18
+; GFX8-NEXT: v_max_i16_e32 v18, v10, v12
+; GFX8-NEXT: v_min_i16_e32 v19, v10, v12
+; GFX8-NEXT: v_sub_u16_e32 v18, v18, v14
+; GFX8-NEXT: v_max_i16_sdwa v6, v18, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_max_i16_e32 v18, v3, v12
+; GFX8-NEXT: v_sub_u16_e32 v19, v19, v16
+; GFX8-NEXT: v_sub_u16_e32 v18, v18, v14
+; GFX8-NEXT: v_min_i16_e32 v6, v6, v19
+; GFX8-NEXT: v_min_i16_e32 v19, v3, v12
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX8-NEXT: v_sub_u16_e32 v19, v19, v16
+; GFX8-NEXT: v_max_i16_e32 v18, v18, v7
+; GFX8-NEXT: v_min_i16_e32 v18, v18, v19
+; GFX8-NEXT: v_max_i16_e32 v19, v11, v12
+; GFX8-NEXT: v_min_i16_e32 v12, v11, v12
+; GFX8-NEXT: v_sub_u16_e32 v14, v19, v14
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v13
+; GFX8-NEXT: v_sub_u16_sdwa v4, v8, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_sub_u16_e32 v12, v12, v16
+; GFX8-NEXT: v_max_i16_sdwa v7, v14, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_u16_e32 v1, v1, v15
+; GFX8-NEXT: v_sub_u16_sdwa v4, v9, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v4
+; GFX8-NEXT: v_min_i16_e32 v7, v7, v12
+; GFX8-NEXT: v_sub_u16_e32 v2, v2, v17
+; GFX8-NEXT: v_sub_u16_sdwa v4, v10, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v4
+; GFX8-NEXT: v_sub_u16_e32 v3, v3, v18
+; GFX8-NEXT: v_sub_u16_sdwa v4, v11, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_v8i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_movk_i32 s4, 0x7fff
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, -1, -1
+; GFX9-NEXT: s_mov_b32 s5, 0xffff8000
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s4
+; GFX9-NEXT: v_pk_max_i16 v8, v0, s6
+; GFX9-NEXT: v_pk_sub_i16 v8, v8, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s5
+; GFX9-NEXT: v_pk_min_i16 v9, v0, s6
+; GFX9-NEXT: v_pk_max_i16 v4, v8, v4
+; GFX9-NEXT: v_pk_sub_i16 v9, v9, s5
+; GFX9-NEXT: v_pk_min_i16 v4, v4, v9
+; GFX9-NEXT: v_pk_sub_i16 v0, v0, v4
+; GFX9-NEXT: v_pk_max_i16 v4, v1, s6
+; GFX9-NEXT: v_pk_sub_i16 v4, v4, s4
+; GFX9-NEXT: v_pk_min_i16 v8, v1, s6
+; GFX9-NEXT: v_pk_max_i16 v4, v4, v5
+; GFX9-NEXT: v_pk_sub_i16 v8, v8, s5
+; GFX9-NEXT: v_pk_min_i16 v4, v4, v8
+; GFX9-NEXT: v_pk_sub_i16 v1, v1, v4
+; GFX9-NEXT: v_pk_max_i16 v4, v2, s6
+; GFX9-NEXT: v_pk_sub_i16 v4, v4, s4
+; GFX9-NEXT: v_pk_min_i16 v5, v2, s6
+; GFX9-NEXT: v_pk_sub_i16 v5, v5, s5
+; GFX9-NEXT: v_pk_max_i16 v4, v4, v6
+; GFX9-NEXT: v_pk_min_i16 v4, v4, v5
+; GFX9-NEXT: v_pk_sub_i16 v2, v2, v4
+; GFX9-NEXT: v_pk_max_i16 v4, v3, s6
+; GFX9-NEXT: v_pk_sub_i16 v4, v4, s4
+; GFX9-NEXT: v_pk_min_i16 v5, v3, s6
+; GFX9-NEXT: v_pk_sub_i16 v5, v5, s5
+; GFX9-NEXT: v_pk_max_i16 v4, v4, v7
+; GFX9-NEXT: v_pk_min_i16 v4, v4, v5
+; GFX9-NEXT: v_pk_sub_i16 v3, v3, v4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_v8i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, -1, -1
+; GFX10-NEXT: s_movk_i32 s5, 0x7fff
+; GFX10-NEXT: v_pk_max_i16 v8, v0, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s5, s5
+; GFX10-NEXT: v_pk_max_i16 v10, v1, s4
+; GFX10-NEXT: v_pk_max_i16 v12, v3, s4
+; GFX10-NEXT: v_pk_min_i16 v9, v0, s4
+; GFX10-NEXT: v_pk_sub_i16 v15, v8, s5
+; GFX10-NEXT: v_pk_max_i16 v8, v2, s4
+; GFX10-NEXT: v_pk_sub_i16 v10, v10, s5
+; GFX10-NEXT: v_pk_sub_i16 v12, v12, s5
+; GFX10-NEXT: v_pk_min_i16 v11, v1, s4
+; GFX10-NEXT: v_pk_min_i16 v13, v2, s4
+; GFX10-NEXT: v_pk_sub_i16 v8, v8, s5
+; GFX10-NEXT: v_pk_min_i16 v14, v3, s4
+; GFX10-NEXT: s_mov_b32 s6, 0xffff8000
+; GFX10-NEXT: v_pk_max_i16 v4, v15, v4
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s6, s6
+; GFX10-NEXT: v_pk_max_i16 v5, v10, v5
+; GFX10-NEXT: v_pk_sub_i16 v11, v11, s6
+; GFX10-NEXT: v_pk_sub_i16 v9, v9, s6
+; GFX10-NEXT: v_pk_max_i16 v15, v8, v6
+; GFX10-NEXT: v_pk_sub_i16 v10, v13, s6
+; GFX10-NEXT: v_pk_sub_i16 v8, v14, s6
+; GFX10-NEXT: v_pk_max_i16 v7, v12, v7
+; GFX10-NEXT: v_pk_min_i16 v19, v4, v9
+; GFX10-NEXT: v_pk_min_i16 v11, v5, v11
+; GFX10-NEXT: v_pk_min_i16 v15, v15, v10
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_pk_min_i16 v6, v7, v8
+; GFX10-NEXT: v_pk_sub_i16 v0, v0, v19
+; GFX10-NEXT: v_pk_sub_i16 v1, v1, v11
+; GFX10-NEXT: v_pk_sub_i16 v2, v2, v15
+; GFX10-NEXT: v_pk_sub_i16 v3, v3, v6
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+ %cast = bitcast <8 x i16> %result to <4 x float>
+ ret <4 x float> %cast
+}
+
+define amdgpu_ps <4 x i32> @s_ssubsat_v8i16(<8 x i16> inreg %lhs, <8 x i16> inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_v8i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s8, s8, 16
+; GFX6-NEXT: s_cmp_gt_i32 s0, -1
+; GFX6-NEXT: s_brev_b32 s16, -2
+; GFX6-NEXT: s_cselect_b32 s18, s0, -1
+; GFX6-NEXT: s_sub_i32 s18, s18, s16
+; GFX6-NEXT: s_cmp_lt_i32 s0, -1
+; GFX6-NEXT: s_mov_b32 s17, 0x80000000
+; GFX6-NEXT: s_cselect_b32 s19, s0, -1
+; GFX6-NEXT: s_sub_i32 s19, s19, s17
+; GFX6-NEXT: s_cmp_gt_i32 s18, s8
+; GFX6-NEXT: s_cselect_b32 s8, s18, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s19
+; GFX6-NEXT: s_cselect_b32 s8, s8, s19
+; GFX6-NEXT: s_sub_i32 s0, s0, s8
+; GFX6-NEXT: s_ashr_i32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s8, s9, 16
+; GFX6-NEXT: s_cmp_gt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s9, s1, -1
+; GFX6-NEXT: s_sub_i32 s9, s9, s16
+; GFX6-NEXT: s_cmp_lt_i32 s1, -1
+; GFX6-NEXT: s_cselect_b32 s18, s1, -1
+; GFX6-NEXT: s_sub_i32 s18, s18, s17
+; GFX6-NEXT: s_cmp_gt_i32 s9, s8
+; GFX6-NEXT: s_cselect_b32 s8, s9, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s18
+; GFX6-NEXT: s_cselect_b32 s8, s8, s18
+; GFX6-NEXT: s_sub_i32 s1, s1, s8
+; GFX6-NEXT: s_ashr_i32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s8, s10, 16
+; GFX6-NEXT: s_cmp_gt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s9, s2, -1
+; GFX6-NEXT: s_sub_i32 s9, s9, s16
+; GFX6-NEXT: s_cmp_lt_i32 s2, -1
+; GFX6-NEXT: s_cselect_b32 s10, s2, -1
+; GFX6-NEXT: s_sub_i32 s10, s10, s17
+; GFX6-NEXT: s_cmp_gt_i32 s9, s8
+; GFX6-NEXT: s_cselect_b32 s8, s9, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s10
+; GFX6-NEXT: s_cselect_b32 s8, s8, s10
+; GFX6-NEXT: s_sub_i32 s2, s2, s8
+; GFX6-NEXT: s_ashr_i32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s8, s11, 16
+; GFX6-NEXT: s_cmp_gt_i32 s3, -1
+; GFX6-NEXT: s_cselect_b32 s9, s3, -1
+; GFX6-NEXT: s_sub_i32 s9, s9, s16
+; GFX6-NEXT: s_cmp_lt_i32 s3, -1
+; GFX6-NEXT: s_cselect_b32 s10, s3, -1
+; GFX6-NEXT: s_sub_i32 s10, s10, s17
+; GFX6-NEXT: s_cmp_gt_i32 s9, s8
+; GFX6-NEXT: s_cselect_b32 s8, s9, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s10
+; GFX6-NEXT: s_cselect_b32 s8, s8, s10
+; GFX6-NEXT: s_sub_i32 s3, s3, s8
+; GFX6-NEXT: s_ashr_i32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_lshl_b32 s8, s12, 16
+; GFX6-NEXT: s_cmp_gt_i32 s4, -1
+; GFX6-NEXT: s_cselect_b32 s9, s4, -1
+; GFX6-NEXT: s_sub_i32 s9, s9, s16
+; GFX6-NEXT: s_cmp_lt_i32 s4, -1
+; GFX6-NEXT: s_cselect_b32 s10, s4, -1
+; GFX6-NEXT: s_sub_i32 s10, s10, s17
+; GFX6-NEXT: s_cmp_gt_i32 s9, s8
+; GFX6-NEXT: s_cselect_b32 s8, s9, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s10
+; GFX6-NEXT: s_cselect_b32 s8, s8, s10
+; GFX6-NEXT: s_sub_i32 s4, s4, s8
+; GFX6-NEXT: s_ashr_i32 s4, s4, 16
+; GFX6-NEXT: s_lshl_b32 s5, s5, 16
+; GFX6-NEXT: s_lshl_b32 s8, s13, 16
+; GFX6-NEXT: s_cmp_gt_i32 s5, -1
+; GFX6-NEXT: s_cselect_b32 s9, s5, -1
+; GFX6-NEXT: s_sub_i32 s9, s9, s16
+; GFX6-NEXT: s_cmp_lt_i32 s5, -1
+; GFX6-NEXT: s_cselect_b32 s10, s5, -1
+; GFX6-NEXT: s_sub_i32 s10, s10, s17
+; GFX6-NEXT: s_cmp_gt_i32 s9, s8
+; GFX6-NEXT: s_cselect_b32 s8, s9, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s10
+; GFX6-NEXT: s_cselect_b32 s8, s8, s10
+; GFX6-NEXT: s_sub_i32 s5, s5, s8
+; GFX6-NEXT: s_ashr_i32 s5, s5, 16
+; GFX6-NEXT: s_lshl_b32 s6, s6, 16
+; GFX6-NEXT: s_lshl_b32 s8, s14, 16
+; GFX6-NEXT: s_cmp_gt_i32 s6, -1
+; GFX6-NEXT: s_cselect_b32 s9, s6, -1
+; GFX6-NEXT: s_sub_i32 s9, s9, s16
+; GFX6-NEXT: s_cmp_lt_i32 s6, -1
+; GFX6-NEXT: s_cselect_b32 s10, s6, -1
+; GFX6-NEXT: s_sub_i32 s10, s10, s17
+; GFX6-NEXT: s_cmp_gt_i32 s9, s8
+; GFX6-NEXT: s_cselect_b32 s8, s9, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s10
+; GFX6-NEXT: s_cselect_b32 s8, s8, s10
+; GFX6-NEXT: s_sub_i32 s6, s6, s8
+; GFX6-NEXT: s_ashr_i32 s6, s6, 16
+; GFX6-NEXT: s_lshl_b32 s7, s7, 16
+; GFX6-NEXT: s_lshl_b32 s8, s15, 16
+; GFX6-NEXT: s_cmp_gt_i32 s7, -1
+; GFX6-NEXT: s_cselect_b32 s9, s7, -1
+; GFX6-NEXT: s_sub_i32 s9, s9, s16
+; GFX6-NEXT: s_cmp_lt_i32 s7, -1
+; GFX6-NEXT: s_cselect_b32 s10, s7, -1
+; GFX6-NEXT: s_sub_i32 s10, s10, s17
+; GFX6-NEXT: s_cmp_gt_i32 s9, s8
+; GFX6-NEXT: s_cselect_b32 s8, s9, s8
+; GFX6-NEXT: s_cmp_lt_i32 s8, s10
+; GFX6-NEXT: s_cselect_b32 s8, s8, s10
+; GFX6-NEXT: s_sub_i32 s7, s7, s8
+; GFX6-NEXT: s_mov_b32 s8, 0xffff
+; GFX6-NEXT: s_and_b32 s1, s1, s8
+; GFX6-NEXT: s_and_b32 s0, s0, s8
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s8
+; GFX6-NEXT: s_and_b32 s2, s3, s8
+; GFX6-NEXT: s_and_b32 s3, s5, s8
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_ashr_i32 s7, s7, 16
+; GFX6-NEXT: s_or_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s2, s4, s8
+; GFX6-NEXT: s_and_b32 s4, s7, s8
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_or_b32 s2, s2, s3
+; GFX6-NEXT: s_and_b32 s3, s6, s8
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_or_b32 s3, s3, s4
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_v8i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshr_b32 s12, s4, 16
+; GFX8-NEXT: s_lshr_b32 s13, s5, 16
+; GFX8-NEXT: s_lshr_b32 s14, s6, 16
+; GFX8-NEXT: s_lshr_b32 s15, s7, 16
+; GFX8-NEXT: s_lshr_b32 s8, s0, 16
+; GFX8-NEXT: s_lshr_b32 s9, s1, 16
+; GFX8-NEXT: s_lshr_b32 s10, s2, 16
+; GFX8-NEXT: s_lshr_b32 s11, s3, 16
+; GFX8-NEXT: s_sext_i32_i16 s18, s0
+; GFX8-NEXT: s_sext_i32_i16 s19, 0xffff
+; GFX8-NEXT: s_cmp_gt_i32 s18, s19
+; GFX8-NEXT: s_movk_i32 s16, 0x7fff
+; GFX8-NEXT: s_cselect_b32 s20, s18, s19
+; GFX8-NEXT: s_sub_i32 s20, s20, s16
+; GFX8-NEXT: s_cmp_lt_i32 s18, s19
+; GFX8-NEXT: s_mov_b32 s17, 0x8000
+; GFX8-NEXT: s_cselect_b32 s18, s18, s19
+; GFX8-NEXT: s_sub_i32 s18, s18, s17
+; GFX8-NEXT: s_sext_i32_i16 s20, s20
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_cmp_gt_i32 s20, s4
+; GFX8-NEXT: s_cselect_b32 s4, s20, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_sext_i32_i16 s18, s18
+; GFX8-NEXT: s_cmp_lt_i32 s4, s18
+; GFX8-NEXT: s_cselect_b32 s4, s4, s18
+; GFX8-NEXT: s_sub_i32 s0, s0, s4
+; GFX8-NEXT: s_sext_i32_i16 s4, s8
+; GFX8-NEXT: s_cmp_gt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s18, s4, s19
+; GFX8-NEXT: s_sub_i32 s18, s18, s16
+; GFX8-NEXT: s_cmp_lt_i32 s4, s19
+; GFX8-NEXT: s_cselect_b32 s4, s4, s19
+; GFX8-NEXT: s_sub_i32 s4, s4, s17
+; GFX8-NEXT: s_sext_i32_i16 s18, s18
+; GFX8-NEXT: s_sext_i32_i16 s12, s12
+; GFX8-NEXT: s_cmp_gt_i32 s18, s12
+; GFX8-NEXT: s_cselect_b32 s12, s18, s12
+; GFX8-NEXT: s_sext_i32_i16 s12, s12
+; GFX8-NEXT: s_sext_i32_i16 s4, s4
+; GFX8-NEXT: s_cmp_lt_i32 s12, s4
+; GFX8-NEXT: s_cselect_b32 s4, s12, s4
+; GFX8-NEXT: s_sub_i32 s4, s8, s4
+; GFX8-NEXT: s_sext_i32_i16 s8, s1
+; GFX8-NEXT: s_cmp_gt_i32 s8, s19
+; GFX8-NEXT: s_cselect_b32 s12, s8, s19
+; GFX8-NEXT: s_sub_i32 s12, s12, s16
+; GFX8-NEXT: s_cmp_lt_i32 s8, s19
+; GFX8-NEXT: s_cselect_b32 s8, s8, s19
+; GFX8-NEXT: s_sub_i32 s8, s8, s17
+; GFX8-NEXT: s_sext_i32_i16 s12, s12
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_cmp_gt_i32 s12, s5
+; GFX8-NEXT: s_cselect_b32 s5, s12, s5
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_sext_i32_i16 s8, s8
+; GFX8-NEXT: s_cmp_lt_i32 s5, s8
+; GFX8-NEXT: s_cselect_b32 s5, s5, s8
+; GFX8-NEXT: s_sub_i32 s1, s1, s5
+; GFX8-NEXT: s_sext_i32_i16 s5, s9
+; GFX8-NEXT: s_cmp_gt_i32 s5, s19
+; GFX8-NEXT: s_cselect_b32 s8, s5, s19
+; GFX8-NEXT: s_sub_i32 s8, s8, s16
+; GFX8-NEXT: s_cmp_lt_i32 s5, s19
+; GFX8-NEXT: s_cselect_b32 s5, s5, s19
+; GFX8-NEXT: s_sub_i32 s5, s5, s17
+; GFX8-NEXT: s_sext_i32_i16 s8, s8
+; GFX8-NEXT: s_sext_i32_i16 s12, s13
+; GFX8-NEXT: s_cmp_gt_i32 s8, s12
+; GFX8-NEXT: s_cselect_b32 s8, s8, s12
+; GFX8-NEXT: s_sext_i32_i16 s8, s8
+; GFX8-NEXT: s_sext_i32_i16 s5, s5
+; GFX8-NEXT: s_cmp_lt_i32 s8, s5
+; GFX8-NEXT: s_cselect_b32 s5, s8, s5
+; GFX8-NEXT: s_sub_i32 s5, s9, s5
+; GFX8-NEXT: s_sext_i32_i16 s8, s2
+; GFX8-NEXT: s_cmp_gt_i32 s8, s19
+; GFX8-NEXT: s_cselect_b32 s9, s8, s19
+; GFX8-NEXT: s_sub_i32 s9, s9, s16
+; GFX8-NEXT: s_cmp_lt_i32 s8, s19
+; GFX8-NEXT: s_cselect_b32 s8, s8, s19
+; GFX8-NEXT: s_sub_i32 s8, s8, s17
+; GFX8-NEXT: s_sext_i32_i16 s9, s9
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_cmp_gt_i32 s9, s6
+; GFX8-NEXT: s_cselect_b32 s6, s9, s6
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_sext_i32_i16 s8, s8
+; GFX8-NEXT: s_cmp_lt_i32 s6, s8
+; GFX8-NEXT: s_cselect_b32 s6, s6, s8
+; GFX8-NEXT: s_sub_i32 s2, s2, s6
+; GFX8-NEXT: s_sext_i32_i16 s6, s10
+; GFX8-NEXT: s_cmp_gt_i32 s6, s19
+; GFX8-NEXT: s_cselect_b32 s8, s6, s19
+; GFX8-NEXT: s_sub_i32 s8, s8, s16
+; GFX8-NEXT: s_cmp_lt_i32 s6, s19
+; GFX8-NEXT: s_cselect_b32 s6, s6, s19
+; GFX8-NEXT: s_sub_i32 s6, s6, s17
+; GFX8-NEXT: s_sext_i32_i16 s8, s8
+; GFX8-NEXT: s_sext_i32_i16 s9, s14
+; GFX8-NEXT: s_cmp_gt_i32 s8, s9
+; GFX8-NEXT: s_cselect_b32 s8, s8, s9
+; GFX8-NEXT: s_sext_i32_i16 s8, s8
+; GFX8-NEXT: s_sext_i32_i16 s6, s6
+; GFX8-NEXT: s_cmp_lt_i32 s8, s6
+; GFX8-NEXT: s_cselect_b32 s6, s8, s6
+; GFX8-NEXT: s_sub_i32 s6, s10, s6
+; GFX8-NEXT: s_sext_i32_i16 s8, s3
+; GFX8-NEXT: s_cmp_gt_i32 s8, s19
+; GFX8-NEXT: s_cselect_b32 s9, s8, s19
+; GFX8-NEXT: s_sub_i32 s9, s9, s16
+; GFX8-NEXT: s_cmp_lt_i32 s8, s19
+; GFX8-NEXT: s_cselect_b32 s8, s8, s19
+; GFX8-NEXT: s_sub_i32 s8, s8, s17
+; GFX8-NEXT: s_sext_i32_i16 s9, s9
+; GFX8-NEXT: s_sext_i32_i16 s7, s7
+; GFX8-NEXT: s_cmp_gt_i32 s9, s7
+; GFX8-NEXT: s_cselect_b32 s7, s9, s7
+; GFX8-NEXT: s_sext_i32_i16 s7, s7
+; GFX8-NEXT: s_sext_i32_i16 s8, s8
+; GFX8-NEXT: s_cmp_lt_i32 s7, s8
+; GFX8-NEXT: s_cselect_b32 s7, s7, s8
+; GFX8-NEXT: s_sub_i32 s3, s3, s7
+; GFX8-NEXT: s_sext_i32_i16 s7, s11
+; GFX8-NEXT: s_cmp_gt_i32 s7, s19
+; GFX8-NEXT: s_cselect_b32 s8, s7, s19
+; GFX8-NEXT: s_sub_i32 s8, s8, s16
+; GFX8-NEXT: s_cmp_lt_i32 s7, s19
+; GFX8-NEXT: s_cselect_b32 s7, s7, s19
+; GFX8-NEXT: s_sub_i32 s7, s7, s17
+; GFX8-NEXT: s_sext_i32_i16 s8, s8
+; GFX8-NEXT: s_sext_i32_i16 s9, s15
+; GFX8-NEXT: s_cmp_gt_i32 s8, s9
+; GFX8-NEXT: s_cselect_b32 s8, s8, s9
+; GFX8-NEXT: s_sext_i32_i16 s8, s8
+; GFX8-NEXT: s_sext_i32_i16 s7, s7
+; GFX8-NEXT: s_cmp_lt_i32 s8, s7
+; GFX8-NEXT: s_cselect_b32 s7, s8, s7
+; GFX8-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s4
+; GFX8-NEXT: s_bfe_u32 s4, s5, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s1, s1, s4
+; GFX8-NEXT: s_bfe_u32 s4, s6, 0x100000
+; GFX8-NEXT: s_sub_i32 s7, s11, s7
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s2, s2, s4
+; GFX8-NEXT: s_bfe_u32 s4, s7, 0x100000
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s3, s3, s4
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_v8i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, -1, -1
+; GFX9-NEXT: s_sext_i32_i16 s13, s10
+; GFX9-NEXT: s_sext_i32_i16 s11, s0
+; GFX9-NEXT: s_ashr_i32 s12, s0, 16
+; GFX9-NEXT: s_ashr_i32 s10, s10, 16
+; GFX9-NEXT: s_cmp_gt_i32 s11, s13
+; GFX9-NEXT: s_cselect_b32 s14, s11, s13
+; GFX9-NEXT: s_cmp_gt_i32 s12, s10
+; GFX9-NEXT: s_movk_i32 s8, 0x7fff
+; GFX9-NEXT: s_cselect_b32 s15, s12, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s15
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s8
+; GFX9-NEXT: s_lshr_b32 s15, s14, 16
+; GFX9-NEXT: s_lshr_b32 s16, s8, 16
+; GFX9-NEXT: s_sub_i32 s14, s14, s8
+; GFX9-NEXT: s_sub_i32 s15, s15, s16
+; GFX9-NEXT: s_cmp_lt_i32 s11, s13
+; GFX9-NEXT: s_cselect_b32 s11, s11, s13
+; GFX9-NEXT: s_cmp_lt_i32 s12, s10
+; GFX9-NEXT: s_mov_b32 s9, 0xffff8000
+; GFX9-NEXT: s_cselect_b32 s12, s12, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s9
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s15
+; GFX9-NEXT: s_lshr_b32 s12, s11, 16
+; GFX9-NEXT: s_lshr_b32 s15, s9, 16
+; GFX9-NEXT: s_sub_i32 s11, s11, s9
+; GFX9-NEXT: s_sub_i32 s12, s12, s15
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s12
+; GFX9-NEXT: s_sext_i32_i16 s12, s14
+; GFX9-NEXT: s_sext_i32_i16 s17, s4
+; GFX9-NEXT: s_ashr_i32 s14, s14, 16
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_cmp_gt_i32 s12, s17
+; GFX9-NEXT: s_cselect_b32 s12, s12, s17
+; GFX9-NEXT: s_cmp_gt_i32 s14, s4
+; GFX9-NEXT: s_cselect_b32 s4, s14, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s12, s4
+; GFX9-NEXT: s_sext_i32_i16 s12, s4
+; GFX9-NEXT: s_sext_i32_i16 s14, s11
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_ashr_i32 s11, s11, 16
+; GFX9-NEXT: s_cmp_lt_i32 s12, s14
+; GFX9-NEXT: s_cselect_b32 s12, s12, s14
+; GFX9-NEXT: s_cmp_lt_i32 s4, s11
+; GFX9-NEXT: s_cselect_b32 s4, s4, s11
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s12, s4
+; GFX9-NEXT: s_lshr_b32 s11, s0, 16
+; GFX9-NEXT: s_lshr_b32 s12, s4, 16
+; GFX9-NEXT: s_sub_i32 s0, s0, s4
+; GFX9-NEXT: s_sub_i32 s4, s11, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s4
+; GFX9-NEXT: s_sext_i32_i16 s4, s1
+; GFX9-NEXT: s_ashr_i32 s11, s1, 16
+; GFX9-NEXT: s_cmp_gt_i32 s4, s13
+; GFX9-NEXT: s_cselect_b32 s12, s4, s13
+; GFX9-NEXT: s_cmp_gt_i32 s11, s10
+; GFX9-NEXT: s_cselect_b32 s14, s11, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s14
+; GFX9-NEXT: s_lshr_b32 s14, s12, 16
+; GFX9-NEXT: s_sub_i32 s12, s12, s8
+; GFX9-NEXT: s_sub_i32 s14, s14, s16
+; GFX9-NEXT: s_cmp_lt_i32 s4, s13
+; GFX9-NEXT: s_cselect_b32 s4, s4, s13
+; GFX9-NEXT: s_cmp_lt_i32 s11, s10
+; GFX9-NEXT: s_cselect_b32 s11, s11, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s11
+; GFX9-NEXT: s_lshr_b32 s11, s4, 16
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s14
+; GFX9-NEXT: s_sub_i32 s4, s4, s9
+; GFX9-NEXT: s_sub_i32 s11, s11, s15
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s11
+; GFX9-NEXT: s_sext_i32_i16 s11, s12
+; GFX9-NEXT: s_sext_i32_i16 s14, s5
+; GFX9-NEXT: s_ashr_i32 s12, s12, 16
+; GFX9-NEXT: s_ashr_i32 s5, s5, 16
+; GFX9-NEXT: s_cmp_gt_i32 s11, s14
+; GFX9-NEXT: s_cselect_b32 s11, s11, s14
+; GFX9-NEXT: s_cmp_gt_i32 s12, s5
+; GFX9-NEXT: s_cselect_b32 s5, s12, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s11, s5
+; GFX9-NEXT: s_sext_i32_i16 s11, s5
+; GFX9-NEXT: s_sext_i32_i16 s12, s4
+; GFX9-NEXT: s_ashr_i32 s5, s5, 16
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_cmp_lt_i32 s11, s12
+; GFX9-NEXT: s_cselect_b32 s11, s11, s12
+; GFX9-NEXT: s_cmp_lt_i32 s5, s4
+; GFX9-NEXT: s_cselect_b32 s4, s5, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s11, s4
+; GFX9-NEXT: s_lshr_b32 s5, s1, 16
+; GFX9-NEXT: s_lshr_b32 s11, s4, 16
+; GFX9-NEXT: s_sub_i32 s1, s1, s4
+; GFX9-NEXT: s_sub_i32 s4, s5, s11
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s4
+; GFX9-NEXT: s_sext_i32_i16 s4, s2
+; GFX9-NEXT: s_ashr_i32 s5, s2, 16
+; GFX9-NEXT: s_cmp_gt_i32 s4, s13
+; GFX9-NEXT: s_cselect_b32 s11, s4, s13
+; GFX9-NEXT: s_cmp_gt_i32 s5, s10
+; GFX9-NEXT: s_cselect_b32 s12, s5, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s12
+; GFX9-NEXT: s_lshr_b32 s12, s11, 16
+; GFX9-NEXT: s_sub_i32 s11, s11, s8
+; GFX9-NEXT: s_sub_i32 s12, s12, s16
+; GFX9-NEXT: s_cmp_lt_i32 s4, s13
+; GFX9-NEXT: s_cselect_b32 s4, s4, s13
+; GFX9-NEXT: s_cmp_lt_i32 s5, s10
+; GFX9-NEXT: s_cselect_b32 s5, s5, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX9-NEXT: s_lshr_b32 s5, s4, 16
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s12
+; GFX9-NEXT: s_sub_i32 s4, s4, s9
+; GFX9-NEXT: s_sub_i32 s5, s5, s15
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX9-NEXT: s_sext_i32_i16 s5, s11
+; GFX9-NEXT: s_sext_i32_i16 s12, s6
+; GFX9-NEXT: s_ashr_i32 s11, s11, 16
+; GFX9-NEXT: s_ashr_i32 s6, s6, 16
+; GFX9-NEXT: s_cmp_gt_i32 s5, s12
+; GFX9-NEXT: s_cselect_b32 s5, s5, s12
+; GFX9-NEXT: s_cmp_gt_i32 s11, s6
+; GFX9-NEXT: s_cselect_b32 s6, s11, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX9-NEXT: s_sext_i32_i16 s6, s5
+; GFX9-NEXT: s_sext_i32_i16 s11, s4
+; GFX9-NEXT: s_ashr_i32 s5, s5, 16
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_cmp_lt_i32 s6, s11
+; GFX9-NEXT: s_cselect_b32 s6, s6, s11
+; GFX9-NEXT: s_cmp_lt_i32 s5, s4
+; GFX9-NEXT: s_cselect_b32 s4, s5, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s6, s4
+; GFX9-NEXT: s_lshr_b32 s5, s2, 16
+; GFX9-NEXT: s_lshr_b32 s6, s4, 16
+; GFX9-NEXT: s_sub_i32 s2, s2, s4
+; GFX9-NEXT: s_sub_i32 s4, s5, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s4
+; GFX9-NEXT: s_sext_i32_i16 s4, s3
+; GFX9-NEXT: s_ashr_i32 s5, s3, 16
+; GFX9-NEXT: s_cmp_gt_i32 s4, s13
+; GFX9-NEXT: s_cselect_b32 s6, s4, s13
+; GFX9-NEXT: s_cmp_gt_i32 s5, s10
+; GFX9-NEXT: s_cselect_b32 s11, s5, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s11
+; GFX9-NEXT: s_lshr_b32 s11, s6, 16
+; GFX9-NEXT: s_sub_i32 s6, s6, s8
+; GFX9-NEXT: s_sub_i32 s8, s11, s16
+; GFX9-NEXT: s_cmp_lt_i32 s4, s13
+; GFX9-NEXT: s_cselect_b32 s4, s4, s13
+; GFX9-NEXT: s_cmp_lt_i32 s5, s10
+; GFX9-NEXT: s_cselect_b32 s5, s5, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX9-NEXT: s_lshr_b32 s5, s4, 16
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s8
+; GFX9-NEXT: s_sub_i32 s4, s4, s9
+; GFX9-NEXT: s_sub_i32 s5, s5, s15
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX9-NEXT: s_sext_i32_i16 s5, s6
+; GFX9-NEXT: s_sext_i32_i16 s8, s7
+; GFX9-NEXT: s_ashr_i32 s6, s6, 16
+; GFX9-NEXT: s_ashr_i32 s7, s7, 16
+; GFX9-NEXT: s_cmp_gt_i32 s5, s8
+; GFX9-NEXT: s_cselect_b32 s5, s5, s8
+; GFX9-NEXT: s_cmp_gt_i32 s6, s7
+; GFX9-NEXT: s_cselect_b32 s6, s6, s7
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX9-NEXT: s_sext_i32_i16 s6, s5
+; GFX9-NEXT: s_sext_i32_i16 s7, s4
+; GFX9-NEXT: s_ashr_i32 s5, s5, 16
+; GFX9-NEXT: s_ashr_i32 s4, s4, 16
+; GFX9-NEXT: s_cmp_lt_i32 s6, s7
+; GFX9-NEXT: s_cselect_b32 s6, s6, s7
+; GFX9-NEXT: s_cmp_lt_i32 s5, s4
+; GFX9-NEXT: s_cselect_b32 s4, s5, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s6, s4
+; GFX9-NEXT: s_lshr_b32 s5, s3, 16
+; GFX9-NEXT: s_lshr_b32 s6, s4, 16
+; GFX9-NEXT: s_sub_i32 s3, s3, s4
+; GFX9-NEXT: s_sub_i32 s4, s5, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_v8i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s8, -1, -1
+; GFX10-NEXT: s_sext_i32_i16 s9, s0
+; GFX10-NEXT: s_sext_i32_i16 s11, s8
+; GFX10-NEXT: s_ashr_i32 s10, s0, 16
+; GFX10-NEXT: s_ashr_i32 s8, s8, 16
+; GFX10-NEXT: s_cmp_gt_i32 s9, s11
+; GFX10-NEXT: s_movk_i32 s14, 0x7fff
+; GFX10-NEXT: s_cselect_b32 s12, s9, s11
+; GFX10-NEXT: s_cmp_gt_i32 s10, s8
+; GFX10-NEXT: s_mov_b32 s16, 0xffff8000
+; GFX10-NEXT: s_cselect_b32 s13, s10, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s16, s16, s16
+; GFX10-NEXT: s_pack_ll_b32_b16 s12, s12, s13
+; GFX10-NEXT: s_pack_ll_b32_b16 s13, s14, s14
+; GFX10-NEXT: s_lshr_b32 s14, s12, 16
+; GFX10-NEXT: s_lshr_b32 s15, s13, 16
+; GFX10-NEXT: s_sub_i32 s12, s12, s13
+; GFX10-NEXT: s_sub_i32 s14, s14, s15
+; GFX10-NEXT: s_cmp_lt_i32 s9, s11
+; GFX10-NEXT: s_sext_i32_i16 s18, s4
+; GFX10-NEXT: s_cselect_b32 s9, s9, s11
+; GFX10-NEXT: s_cmp_lt_i32 s10, s8
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s10, s10, s8
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s9, s9, s10
+; GFX10-NEXT: s_pack_ll_b32_b16 s10, s12, s14
+; GFX10-NEXT: s_lshr_b32 s12, s9, 16
+; GFX10-NEXT: s_lshr_b32 s14, s16, 16
+; GFX10-NEXT: s_sext_i32_i16 s17, s10
+; GFX10-NEXT: s_sub_i32 s9, s9, s16
+; GFX10-NEXT: s_sub_i32 s12, s12, s14
+; GFX10-NEXT: s_ashr_i32 s10, s10, 16
+; GFX10-NEXT: s_cmp_gt_i32 s17, s18
+; GFX10-NEXT: s_pack_ll_b32_b16 s9, s9, s12
+; GFX10-NEXT: s_cselect_b32 s17, s17, s18
+; GFX10-NEXT: s_cmp_gt_i32 s10, s4
+; GFX10-NEXT: s_sext_i32_i16 s12, s9
+; GFX10-NEXT: s_cselect_b32 s4, s10, s4
+; GFX10-NEXT: s_ashr_i32 s9, s9, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s17, s4
+; GFX10-NEXT: s_sext_i32_i16 s18, s5
+; GFX10-NEXT: s_sext_i32_i16 s10, s4
+; GFX10-NEXT: s_ashr_i32 s4, s4, 16
+; GFX10-NEXT: s_cmp_lt_i32 s10, s12
+; GFX10-NEXT: s_cselect_b32 s10, s10, s12
+; GFX10-NEXT: s_cmp_lt_i32 s4, s9
+; GFX10-NEXT: s_cselect_b32 s4, s4, s9
+; GFX10-NEXT: s_lshr_b32 s9, s0, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s10, s4
+; GFX10-NEXT: s_lshr_b32 s10, s4, 16
+; GFX10-NEXT: s_sub_i32 s0, s0, s4
+; GFX10-NEXT: s_sub_i32 s4, s9, s10
+; GFX10-NEXT: s_sext_i32_i16 s9, s1
+; GFX10-NEXT: s_ashr_i32 s10, s1, 16
+; GFX10-NEXT: s_cmp_gt_i32 s9, s11
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s4
+; GFX10-NEXT: s_cselect_b32 s12, s9, s11
+; GFX10-NEXT: s_cmp_gt_i32 s10, s8
+; GFX10-NEXT: s_cselect_b32 s17, s10, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s12, s12, s17
+; GFX10-NEXT: s_lshr_b32 s17, s12, 16
+; GFX10-NEXT: s_sub_i32 s12, s12, s13
+; GFX10-NEXT: s_sub_i32 s17, s17, s15
+; GFX10-NEXT: s_cmp_lt_i32 s9, s11
+; GFX10-NEXT: s_cselect_b32 s9, s9, s11
+; GFX10-NEXT: s_cmp_lt_i32 s10, s8
+; GFX10-NEXT: s_cselect_b32 s10, s10, s8
+; GFX10-NEXT: s_ashr_i32 s5, s5, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s9, s9, s10
+; GFX10-NEXT: s_pack_ll_b32_b16 s10, s12, s17
+; GFX10-NEXT: s_lshr_b32 s12, s9, 16
+; GFX10-NEXT: s_sext_i32_i16 s17, s10
+; GFX10-NEXT: s_sub_i32 s9, s9, s16
+; GFX10-NEXT: s_sub_i32 s12, s12, s14
+; GFX10-NEXT: s_ashr_i32 s10, s10, 16
+; GFX10-NEXT: s_cmp_gt_i32 s17, s18
+; GFX10-NEXT: s_pack_ll_b32_b16 s9, s9, s12
+; GFX10-NEXT: s_cselect_b32 s17, s17, s18
+; GFX10-NEXT: s_cmp_gt_i32 s10, s5
+; GFX10-NEXT: s_sext_i32_i16 s12, s9
+; GFX10-NEXT: s_cselect_b32 s5, s10, s5
+; GFX10-NEXT: s_ashr_i32 s9, s9, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s17, s5
+; GFX10-NEXT: s_sext_i32_i16 s18, s6
+; GFX10-NEXT: s_sext_i32_i16 s10, s5
+; GFX10-NEXT: s_ashr_i32 s5, s5, 16
+; GFX10-NEXT: s_cmp_lt_i32 s10, s12
+; GFX10-NEXT: s_cselect_b32 s10, s10, s12
+; GFX10-NEXT: s_cmp_lt_i32 s5, s9
+; GFX10-NEXT: s_cselect_b32 s5, s5, s9
+; GFX10-NEXT: s_lshr_b32 s9, s1, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s10, s5
+; GFX10-NEXT: s_lshr_b32 s10, s5, 16
+; GFX10-NEXT: s_sub_i32 s1, s1, s5
+; GFX10-NEXT: s_sub_i32 s5, s9, s10
+; GFX10-NEXT: s_sext_i32_i16 s9, s2
+; GFX10-NEXT: s_ashr_i32 s10, s2, 16
+; GFX10-NEXT: s_cmp_gt_i32 s9, s11
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s5
+; GFX10-NEXT: s_cselect_b32 s12, s9, s11
+; GFX10-NEXT: s_cmp_gt_i32 s10, s8
+; GFX10-NEXT: s_cselect_b32 s17, s10, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s12, s12, s17
+; GFX10-NEXT: s_lshr_b32 s17, s12, 16
+; GFX10-NEXT: s_sub_i32 s12, s12, s13
+; GFX10-NEXT: s_sub_i32 s17, s17, s15
+; GFX10-NEXT: s_cmp_lt_i32 s9, s11
+; GFX10-NEXT: s_cselect_b32 s9, s9, s11
+; GFX10-NEXT: s_cmp_lt_i32 s10, s8
+; GFX10-NEXT: s_cselect_b32 s10, s10, s8
+; GFX10-NEXT: s_ashr_i32 s6, s6, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s9, s9, s10
+; GFX10-NEXT: s_pack_ll_b32_b16 s10, s12, s17
+; GFX10-NEXT: s_lshr_b32 s12, s9, 16
+; GFX10-NEXT: s_sext_i32_i16 s17, s10
+; GFX10-NEXT: s_sub_i32 s9, s9, s16
+; GFX10-NEXT: s_sub_i32 s12, s12, s14
+; GFX10-NEXT: s_ashr_i32 s10, s10, 16
+; GFX10-NEXT: s_cmp_gt_i32 s17, s18
+; GFX10-NEXT: s_pack_ll_b32_b16 s9, s9, s12
+; GFX10-NEXT: s_cselect_b32 s17, s17, s18
+; GFX10-NEXT: s_cmp_gt_i32 s10, s6
+; GFX10-NEXT: s_sext_i32_i16 s12, s9
+; GFX10-NEXT: s_cselect_b32 s6, s10, s6
+; GFX10-NEXT: s_ashr_i32 s9, s9, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX10-NEXT: s_sext_i32_i16 s10, s6
+; GFX10-NEXT: s_ashr_i32 s6, s6, 16
+; GFX10-NEXT: s_cmp_lt_i32 s10, s12
+; GFX10-NEXT: s_cselect_b32 s10, s10, s12
+; GFX10-NEXT: s_cmp_lt_i32 s6, s9
+; GFX10-NEXT: s_cselect_b32 s6, s6, s9
+; GFX10-NEXT: s_lshr_b32 s9, s2, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s10, s6
+; GFX10-NEXT: s_lshr_b32 s10, s6, 16
+; GFX10-NEXT: s_sub_i32 s2, s2, s6
+; GFX10-NEXT: s_sub_i32 s6, s9, s10
+; GFX10-NEXT: s_sext_i32_i16 s9, s3
+; GFX10-NEXT: s_ashr_i32 s10, s3, 16
+; GFX10-NEXT: s_cmp_gt_i32 s9, s11
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s6
+; GFX10-NEXT: s_cselect_b32 s12, s9, s11
+; GFX10-NEXT: s_cmp_gt_i32 s10, s8
+; GFX10-NEXT: s_cselect_b32 s17, s10, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s12, s12, s17
+; GFX10-NEXT: s_lshr_b32 s17, s12, 16
+; GFX10-NEXT: s_sub_i32 s12, s12, s13
+; GFX10-NEXT: s_sub_i32 s13, s17, s15
+; GFX10-NEXT: s_cmp_lt_i32 s9, s11
+; GFX10-NEXT: s_cselect_b32 s9, s9, s11
+; GFX10-NEXT: s_cmp_lt_i32 s10, s8
+; GFX10-NEXT: s_cselect_b32 s8, s10, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s8, s9, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s9, s12, s13
+; GFX10-NEXT: s_lshr_b32 s10, s8, 16
+; GFX10-NEXT: s_sext_i32_i16 s11, s9
+; GFX10-NEXT: s_sext_i32_i16 s12, s7
+; GFX10-NEXT: s_sub_i32 s8, s8, s16
+; GFX10-NEXT: s_sub_i32 s10, s10, s14
+; GFX10-NEXT: s_ashr_i32 s9, s9, 16
+; GFX10-NEXT: s_ashr_i32 s7, s7, 16
+; GFX10-NEXT: s_cmp_gt_i32 s11, s12
+; GFX10-NEXT: s_pack_ll_b32_b16 s8, s8, s10
+; GFX10-NEXT: s_cselect_b32 s11, s11, s12
+; GFX10-NEXT: s_cmp_gt_i32 s9, s7
+; GFX10-NEXT: s_sext_i32_i16 s10, s8
+; GFX10-NEXT: s_cselect_b32 s7, s9, s7
+; GFX10-NEXT: s_ashr_i32 s8, s8, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s7, s11, s7
+; GFX10-NEXT: s_sext_i32_i16 s9, s7
+; GFX10-NEXT: s_ashr_i32 s7, s7, 16
+; GFX10-NEXT: s_cmp_lt_i32 s9, s10
+; GFX10-NEXT: s_cselect_b32 s9, s9, s10
+; GFX10-NEXT: s_cmp_lt_i32 s7, s8
+; GFX10-NEXT: s_cselect_b32 s4, s7, s8
+; GFX10-NEXT: s_lshr_b32 s5, s3, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s9, s4
+; GFX10-NEXT: s_lshr_b32 s7, s4, 16
+; GFX10-NEXT: s_sub_i32 s3, s3, s4
+; GFX10-NEXT: s_sub_i32 s4, s5, s7
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s4
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+ %cast = bitcast <8 x i16> %result to <4 x i32>
+ ret <4 x i32> %cast
+}
+
+; FIXME: i48 broken because i48 add broken
+; define i48 @v_ssubsat_i48(i48 %lhs, i48 %rhs) {
+; %result = call i48 @llvm.ssub.sat.i48(i48 %lhs, i48 %rhs)
+; ret i48 %result
+; }
+
+; define amdgpu_ps i48 @s_ssubsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
+; %result = call i48 @llvm.ssub.sat.i48(i48 %lhs, i48 %rhs)
+; ret i48 %result
+; }
+
+; define amdgpu_ps <2 x float> @ssubsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
+; %result = call i48 @llvm.ssub.sat.i48(i48 %lhs, i48 %rhs)
+; %ext.result = zext i48 %result to i64
+; %cast = bitcast i64 %ext.result to <2 x float>
+; ret <2 x float> %cast
+; }
+
+; define amdgpu_ps <2 x float> @ssubsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
+; %result = call i48 @llvm.ssub.sat.i48(i48 %lhs, i48 %rhs)
+; %ext.result = zext i48 %result to i64
+; %cast = bitcast i64 %ext.result to <2 x float>
+; ret <2 x float> %cast
+; }
+
+define i64 @v_ssubsat_i64(i64 %lhs, i64 %rhs) {
+; GFX6-LABEL: v_ssubsat_i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, v0, v2
+; GFX6-NEXT: v_subb_u32_e32 v5, vcc, v1, v3, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX6-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[2:3]
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v5
+; GFX6-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX6-NEXT: v_add_i32_e64 v2, s[6:7], 0, v0
+; GFX6-NEXT: v_addc_u32_e64 v1, s[6:7], v0, v1, s[6:7]
+; GFX6-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_sub_u32_e32 v4, vcc, v0, v2
+; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v1, v3, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX8-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[2:3]
+; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v5
+; GFX8-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX8-NEXT: v_add_u32_e64 v2, s[6:7], 0, v0
+; GFX8-NEXT: v_addc_u32_e64 v1, s[6:7], v0, v1, s[6:7]
+; GFX8-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v1, v3, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[2:3]
+; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v5
+; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX9-NEXT: v_add_co_u32_e64 v2, s[6:7], 0, v0
+; GFX9-NEXT: v_addc_co_u32_e64 v1, s[6:7], v0, v1, s[6:7]
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_sub_co_u32_e64 v10, vcc_lo, v0, v2
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v11, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, 0, v[2:3]
+; GFX10-NEXT: v_ashrrev_i32_e32 v6, 31, v11
+; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[10:11], v[0:1]
+; GFX10-NEXT: v_add_co_u32_e64 v0, s5, v6, 0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s5, 0x80000000, v6, s5
+; GFX10-NEXT: s_xor_b32 vcc_lo, vcc_lo, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v10, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v11, v1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i64 @llvm.ssub.sat.i64(i64 %lhs, i64 %rhs)
+ ret i64 %result
+}
+
+define amdgpu_ps i64 @s_ssubsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_sub_u32 s4, s0, s2
+; GFX6-NEXT: s_cselect_b32 s5, 1, 0
+; GFX6-NEXT: s_and_b32 s5, s5, 1
+; GFX6-NEXT: s_cmp_lg_u32 s5, 0
+; GFX6-NEXT: v_mov_b32_e32 v0, s0
+; GFX6-NEXT: s_subb_u32 s5, s1, s3
+; GFX6-NEXT: v_mov_b32_e32 v1, s1
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
+; GFX6-NEXT: v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
+; GFX6-NEXT: s_ashr_i32 s2, s5, 31
+; GFX6-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX6-NEXT: s_add_u32 s0, s2, 0
+; GFX6-NEXT: s_cselect_b32 s1, 1, 0
+; GFX6-NEXT: s_and_b32 s1, s1, 1
+; GFX6-NEXT: s_cmp_lg_u32 s1, 0
+; GFX6-NEXT: s_addc_u32 s1, s2, 0x80000000
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: v_mov_b32_e32 v1, s0
+; GFX6-NEXT: v_mov_b32_e32 v2, s1
+; GFX6-NEXT: v_mov_b32_e32 v3, s5
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v0
+; GFX6-NEXT: v_readfirstlane_b32 s1, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_sub_u32 s4, s0, s2
+; GFX8-NEXT: s_cselect_b32 s5, 1, 0
+; GFX8-NEXT: s_and_b32 s5, s5, 1
+; GFX8-NEXT: s_cmp_lg_u32 s5, 0
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: s_subb_u32 s5, s1, s3
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
+; GFX8-NEXT: v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
+; GFX8-NEXT: s_ashr_i32 s2, s5, 31
+; GFX8-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX8-NEXT: s_add_u32 s0, s2, 0
+; GFX8-NEXT: s_cselect_b32 s1, 1, 0
+; GFX8-NEXT: s_and_b32 s1, s1, 1
+; GFX8-NEXT: s_cmp_lg_u32 s1, 0
+; GFX8-NEXT: s_addc_u32 s1, s2, 0x80000000
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: v_mov_b32_e32 v1, s0
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: v_mov_b32_e32 v3, s5
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v0
+; GFX8-NEXT: v_readfirstlane_b32 s1, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_sub_u32 s4, s0, s2
+; GFX9-NEXT: s_cselect_b32 s5, 1, 0
+; GFX9-NEXT: s_and_b32 s5, s5, 1
+; GFX9-NEXT: s_cmp_lg_u32 s5, 0
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: s_subb_u32 s5, s1, s3
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
+; GFX9-NEXT: s_ashr_i32 s2, s5, 31
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX9-NEXT: s_add_u32 s0, s2, 0
+; GFX9-NEXT: s_cselect_b32 s1, 1, 0
+; GFX9-NEXT: s_and_b32 s1, s1, 1
+; GFX9-NEXT: s_cmp_lg_u32 s1, 0
+; GFX9-NEXT: s_addc_u32 s1, s2, 0x80000000
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: v_readfirstlane_b32 s1, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_sub_u32 s4, s0, s2
+; GFX10-NEXT: s_cselect_b32 s5, 1, 0
+; GFX10-NEXT: v_mov_b32_e32 v0, s4
+; GFX10-NEXT: s_and_b32 s5, s5, 1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lg_u32 s5, 0
+; GFX10-NEXT: s_subb_u32 s5, s1, s3
+; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[4:5], s[0:1]
+; GFX10-NEXT: v_cmp_gt_i64_e64 s1, s[2:3], 0
+; GFX10-NEXT: s_ashr_i32 s2, s5, 31
+; GFX10-NEXT: v_mov_b32_e32 v1, s5
+; GFX10-NEXT: s_xor_b32 s3, s1, s0
+; GFX10-NEXT: s_add_u32 s0, s2, 0
+; GFX10-NEXT: s_cselect_b32 s1, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s0, s3
+; GFX10-NEXT: s_and_b32 s1, s1, 1
+; GFX10-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10-NEXT: s_addc_u32 s1, s2, 0x80000000
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s1, s3
+; GFX10-NEXT: v_readfirstlane_b32 s1, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i64 @llvm.ssub.sat.i64(i64 %lhs, i64 %rhs)
+ ret i64 %result
+}
+
+define amdgpu_ps <2 x float> @ssubsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
+; GFX6-LABEL: ssubsat_i64_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v3, s1
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, s0, v0
+; GFX6-NEXT: v_subb_u32_e32 v3, vcc, v3, v1, vcc
+; GFX6-NEXT: v_cmp_gt_i64_e32 vcc, s[0:1], v[2:3]
+; GFX6-NEXT: v_cmp_lt_i64_e64 s[0:1], 0, v[0:1]
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; GFX6-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX6-NEXT: v_add_i32_e64 v4, s[2:3], 0, v0
+; GFX6-NEXT: v_addc_u32_e64 v1, s[2:3], v0, v1, s[2:3]
+; GFX6-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: ssubsat_i64_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, s0, v0
+; GFX8-NEXT: v_subb_u32_e32 v3, vcc, v3, v1, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, s[0:1], v[2:3]
+; GFX8-NEXT: v_cmp_lt_i64_e64 s[0:1], 0, v[0:1]
+; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; GFX8-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX8-NEXT: v_add_u32_e64 v4, s[2:3], 0, v0
+; GFX8-NEXT: v_addc_u32_e64 v1, s[2:3], v0, v1, s[2:3]
+; GFX8-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: ssubsat_i64_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: v_sub_co_u32_e32 v2, vcc, s0, v0
+; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v1, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[0:1], 0, v[0:1]
+; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX9-NEXT: v_add_co_u32_e64 v4, s[2:3], 0, v0
+; GFX9-NEXT: v_addc_co_u32_e64 v1, s[2:3], v0, v1, s[2:3]
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: ssubsat_i64_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_sub_co_u32_e64 v2, vcc_lo, s0, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, 0, v[0:1]
+; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
+; GFX10-NEXT: v_cmp_gt_i64_e64 s0, s[0:1], v[2:3]
+; GFX10-NEXT: v_add_co_u32_e64 v0, s1, v4, 0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s1, 0x80000000, v4, s1
+; GFX10-NEXT: s_xor_b32 vcc_lo, vcc_lo, s0
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i64 @llvm.ssub.sat.i64(i64 %lhs, i64 %rhs)
+ %cast = bitcast i64 %result to <2 x float>
+ ret <2 x float> %cast
+}
+
+define amdgpu_ps <2 x float> @ssubsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
+; GFX6-LABEL: ssubsat_i64_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v3, s1
+; GFX6-NEXT: v_subrev_i32_e32 v2, vcc, s0, v0
+; GFX6-NEXT: v_subb_u32_e32 v3, vcc, v1, v3, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[0:1]
+; GFX6-NEXT: v_cmp_gt_i64_e64 s[2:3], s[0:1], 0
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; GFX6-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX6-NEXT: v_add_i32_e64 v4, s[0:1], 0, v0
+; GFX6-NEXT: v_addc_u32_e64 v1, s[0:1], v0, v1, s[0:1]
+; GFX6-NEXT: s_xor_b64 vcc, s[2:3], vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: ssubsat_i64_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_subrev_u32_e32 v2, vcc, s0, v0
+; GFX8-NEXT: v_subb_u32_e32 v3, vcc, v1, v3, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[0:1]
+; GFX8-NEXT: v_cmp_gt_i64_e64 s[2:3], s[0:1], 0
+; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; GFX8-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX8-NEXT: v_add_u32_e64 v4, s[0:1], 0, v0
+; GFX8-NEXT: v_addc_u32_e64 v1, s[0:1], v0, v1, s[0:1]
+; GFX8-NEXT: s_xor_b64 vcc, s[2:3], vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: ssubsat_i64_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: v_subrev_co_u32_e32 v2, vcc, s0, v0
+; GFX9-NEXT: v_subb_co_u32_e32 v3, vcc, v1, v3, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[2:3], v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[2:3], s[0:1], 0
+; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v3
+; GFX9-NEXT: v_bfrev_b32_e32 v1, 1
+; GFX9-NEXT: v_add_co_u32_e64 v4, s[0:1], 0, v0
+; GFX9-NEXT: v_addc_co_u32_e64 v1, s[0:1], v0, v1, s[0:1]
+; GFX9-NEXT: s_xor_b64 vcc, s[2:3], vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v2, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: ssubsat_i64_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_sub_co_u32_e64 v2, vcc_lo, v0, s0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_subrev_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_i64_e64 s1, s[0:1], 0
+; GFX10-NEXT: v_ashrrev_i32_e32 v4, 31, v3
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX10-NEXT: v_add_co_u32_e64 v0, s0, v4, 0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v1, s0, 0x80000000, v4, s0
+; GFX10-NEXT: s_xor_b32 vcc_lo, s1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v2, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v1, vcc_lo
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i64 @llvm.ssub.sat.i64(i64 %lhs, i64 %rhs)
+ %cast = bitcast i64 %result to <2 x float>
+ ret <2 x float> %cast
+}
+
+define <2 x i64> @v_ssubsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; GFX6-LABEL: v_ssubsat_v2i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, v0, v4
+; GFX6-NEXT: v_subb_u32_e32 v9, vcc, v1, v5, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, v[8:9], v[0:1]
+; GFX6-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[4:5]
+; GFX6-NEXT: v_ashrrev_i32_e32 v0, 31, v9
+; GFX6-NEXT: s_brev_b32 s8, 1
+; GFX6-NEXT: v_mov_b32_e32 v1, s8
+; GFX6-NEXT: v_add_i32_e64 v4, s[6:7], 0, v0
+; GFX6-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX6-NEXT: v_addc_u32_e64 v1, s[6:7], v0, v1, s[6:7]
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v8, v4, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, v2, v6
+; GFX6-NEXT: v_subb_u32_e32 v5, vcc, v3, v7, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[2:3]
+; GFX6-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[6:7]
+; GFX6-NEXT: v_ashrrev_i32_e32 v2, 31, v5
+; GFX6-NEXT: v_mov_b32_e32 v3, s8
+; GFX6-NEXT: v_add_i32_e64 v6, s[6:7], 0, v2
+; GFX6-NEXT: v_addc_u32_e64 v3, s[6:7], v2, v3, s[6:7]
+; GFX6-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_v2i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_sub_u32_e32 v8, vcc, v0, v4
+; GFX8-NEXT: v_subb_u32_e32 v9, vcc, v1, v5, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[8:9], v[0:1]
+; GFX8-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[4:5]
+; GFX8-NEXT: v_ashrrev_i32_e32 v0, 31, v9
+; GFX8-NEXT: s_brev_b32 s8, 1
+; GFX8-NEXT: v_mov_b32_e32 v1, s8
+; GFX8-NEXT: v_add_u32_e64 v4, s[6:7], 0, v0
+; GFX8-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX8-NEXT: v_addc_u32_e64 v1, s[6:7], v0, v1, s[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v8, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX8-NEXT: v_sub_u32_e32 v4, vcc, v2, v6
+; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v3, v7, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[2:3]
+; GFX8-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[6:7]
+; GFX8-NEXT: v_ashrrev_i32_e32 v2, 31, v5
+; GFX8-NEXT: v_mov_b32_e32 v3, s8
+; GFX8-NEXT: v_add_u32_e64 v6, s[6:7], 0, v2
+; GFX8-NEXT: v_addc_u32_e64 v3, s[6:7], v2, v3, s[6:7]
+; GFX8-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_v2i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v0, v4
+; GFX9-NEXT: v_subb_co_u32_e32 v9, vcc, v1, v5, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[8:9], v[0:1]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[4:5]
+; GFX9-NEXT: v_ashrrev_i32_e32 v0, 31, v9
+; GFX9-NEXT: s_brev_b32 s8, 1
+; GFX9-NEXT: v_mov_b32_e32 v1, s8
+; GFX9-NEXT: v_add_co_u32_e64 v4, s[6:7], 0, v0
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX9-NEXT: v_addc_co_u32_e64 v1, s[6:7], v0, v1, s[6:7]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v8, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v9, v1, vcc
+; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v2, v6
+; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v3, v7, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[4:5], v[2:3]
+; GFX9-NEXT: v_cmp_lt_i64_e64 s[4:5], 0, v[6:7]
+; GFX9-NEXT: v_ashrrev_i32_e32 v2, 31, v5
+; GFX9-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-NEXT: v_add_co_u32_e64 v6, s[6:7], 0, v2
+; GFX9-NEXT: v_addc_co_u32_e64 v3, s[6:7], v2, v3, s[6:7]
+; GFX9-NEXT: s_xor_b64 vcc, s[4:5], vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_v2i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_mov_b32_e32 v9, v0
+; GFX10-NEXT: v_mov_b32_e32 v10, v1
+; GFX10-NEXT: v_mov_b32_e32 v13, v2
+; GFX10-NEXT: v_mov_b32_e32 v14, v3
+; GFX10-NEXT: v_cmp_lt_i64_e64 s4, 0, v[4:5]
+; GFX10-NEXT: v_sub_co_u32_e64 v19, vcc_lo, v9, v4
+; GFX10-NEXT: s_brev_b32 s8, 1
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v20, vcc_lo, v10, v5, vcc_lo
+; GFX10-NEXT: v_sub_co_u32_e64 v23, vcc_lo, v13, v6
+; GFX10-NEXT: v_cmp_lt_i64_e64 s6, 0, v[6:7]
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v24, vcc_lo, v14, v7, vcc_lo
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_ashrrev_i32_e32 v0, 31, v20
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[19:20], v[9:10]
+; GFX10-NEXT: v_ashrrev_i32_e32 v1, 31, v24
+; GFX10-NEXT: v_add_co_u32_e64 v4, s5, v0, 0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v5, s5, s8, v0, s5
+; GFX10-NEXT: v_cmp_lt_i64_e64 s5, v[23:24], v[13:14]
+; GFX10-NEXT: v_add_co_u32_e64 v2, s7, v1, 0
+; GFX10-NEXT: s_xor_b32 vcc_lo, s4, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e64 v3, s7, s8, v1, s7
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v19, v4, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v20, v5, vcc_lo
+; GFX10-NEXT: s_xor_b32 vcc_lo, s6, s5
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v23, v2, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v24, v3, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+ ret <2 x i64> %result
+}
+
+define amdgpu_ps <2 x i64> @s_ssubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_v2i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_sub_u32 s8, s0, s4
+; GFX6-NEXT: s_cselect_b32 s9, 1, 0
+; GFX6-NEXT: s_and_b32 s9, s9, 1
+; GFX6-NEXT: s_cmp_lg_u32 s9, 0
+; GFX6-NEXT: v_mov_b32_e32 v0, s0
+; GFX6-NEXT: s_subb_u32 s9, s1, s5
+; GFX6-NEXT: v_mov_b32_e32 v1, s1
+; GFX6-NEXT: v_cmp_gt_i64_e64 s[0:1], s[4:5], 0
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
+; GFX6-NEXT: s_ashr_i32 s4, s9, 31
+; GFX6-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX6-NEXT: s_add_u32 s0, s4, 0
+; GFX6-NEXT: s_cselect_b32 s1, 1, 0
+; GFX6-NEXT: s_and_b32 s1, s1, 1
+; GFX6-NEXT: s_brev_b32 s5, 1
+; GFX6-NEXT: s_cmp_lg_u32 s1, 0
+; GFX6-NEXT: s_addc_u32 s1, s4, s5
+; GFX6-NEXT: v_mov_b32_e32 v1, s0
+; GFX6-NEXT: s_sub_u32 s0, s2, s6
+; GFX6-NEXT: v_mov_b32_e32 v2, s1
+; GFX6-NEXT: s_cselect_b32 s1, 1, 0
+; GFX6-NEXT: s_and_b32 s1, s1, 1
+; GFX6-NEXT: v_mov_b32_e32 v0, s8
+; GFX6-NEXT: s_cmp_lg_u32 s1, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc
+; GFX6-NEXT: v_mov_b32_e32 v0, s2
+; GFX6-NEXT: s_subb_u32 s1, s3, s7
+; GFX6-NEXT: v_mov_b32_e32 v3, s9
+; GFX6-NEXT: v_mov_b32_e32 v1, s3
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
+; GFX6-NEXT: v_cmp_gt_i64_e64 s[2:3], s[6:7], 0
+; GFX6-NEXT: s_ashr_i32 s4, s1, 31
+; GFX6-NEXT: s_xor_b64 vcc, s[2:3], vcc
+; GFX6-NEXT: v_mov_b32_e32 v0, s0
+; GFX6-NEXT: s_add_u32 s0, s4, 0
+; GFX6-NEXT: s_cselect_b32 s2, 1, 0
+; GFX6-NEXT: s_and_b32 s2, s2, 1
+; GFX6-NEXT: s_cmp_lg_u32 s2, 0
+; GFX6-NEXT: s_addc_u32 s3, s4, s5
+; GFX6-NEXT: v_mov_b32_e32 v1, s0
+; GFX6-NEXT: v_mov_b32_e32 v5, s1
+; GFX6-NEXT: v_mov_b32_e32 v3, s3
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v4
+; GFX6-NEXT: v_readfirstlane_b32 s1, v2
+; GFX6-NEXT: v_readfirstlane_b32 s2, v0
+; GFX6-NEXT: v_readfirstlane_b32 s3, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_v2i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_sub_u32 s8, s0, s4
+; GFX8-NEXT: s_cselect_b32 s9, 1, 0
+; GFX8-NEXT: s_and_b32 s9, s9, 1
+; GFX8-NEXT: s_cmp_lg_u32 s9, 0
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: s_subb_u32 s9, s1, s5
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: v_cmp_gt_i64_e64 s[0:1], s[4:5], 0
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
+; GFX8-NEXT: s_ashr_i32 s4, s9, 31
+; GFX8-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX8-NEXT: s_add_u32 s0, s4, 0
+; GFX8-NEXT: s_cselect_b32 s1, 1, 0
+; GFX8-NEXT: s_and_b32 s1, s1, 1
+; GFX8-NEXT: s_brev_b32 s5, 1
+; GFX8-NEXT: s_cmp_lg_u32 s1, 0
+; GFX8-NEXT: s_addc_u32 s1, s4, s5
+; GFX8-NEXT: v_mov_b32_e32 v1, s0
+; GFX8-NEXT: s_sub_u32 s0, s2, s6
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: s_cselect_b32 s1, 1, 0
+; GFX8-NEXT: s_and_b32 s1, s1, 1
+; GFX8-NEXT: v_mov_b32_e32 v0, s8
+; GFX8-NEXT: s_cmp_lg_u32 s1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: s_subb_u32 s1, s3, s7
+; GFX8-NEXT: v_mov_b32_e32 v3, s9
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT: v_cmp_gt_i64_e64 s[2:3], s[6:7], 0
+; GFX8-NEXT: s_ashr_i32 s4, s1, 31
+; GFX8-NEXT: s_xor_b64 vcc, s[2:3], vcc
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: s_add_u32 s0, s4, 0
+; GFX8-NEXT: s_cselect_b32 s2, 1, 0
+; GFX8-NEXT: s_and_b32 s2, s2, 1
+; GFX8-NEXT: s_cmp_lg_u32 s2, 0
+; GFX8-NEXT: s_addc_u32 s3, s4, s5
+; GFX8-NEXT: v_mov_b32_e32 v1, s0
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_mov_b32_e32 v3, s3
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v4
+; GFX8-NEXT: v_readfirstlane_b32 s1, v2
+; GFX8-NEXT: v_readfirstlane_b32 s2, v0
+; GFX8-NEXT: v_readfirstlane_b32 s3, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_v2i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_sub_u32 s8, s0, s4
+; GFX9-NEXT: s_cselect_b32 s9, 1, 0
+; GFX9-NEXT: s_and_b32 s9, s9, 1
+; GFX9-NEXT: s_cmp_lg_u32 s9, 0
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: s_subb_u32 s9, s1, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[4:5], 0
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[8:9], v[0:1]
+; GFX9-NEXT: s_ashr_i32 s4, s9, 31
+; GFX9-NEXT: s_xor_b64 vcc, s[0:1], vcc
+; GFX9-NEXT: s_add_u32 s0, s4, 0
+; GFX9-NEXT: s_cselect_b32 s1, 1, 0
+; GFX9-NEXT: s_and_b32 s1, s1, 1
+; GFX9-NEXT: s_brev_b32 s5, 1
+; GFX9-NEXT: s_cmp_lg_u32 s1, 0
+; GFX9-NEXT: s_addc_u32 s1, s4, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: s_sub_u32 s0, s2, s6
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: s_cselect_b32 s1, 1, 0
+; GFX9-NEXT: s_and_b32 s1, s1, 1
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: s_cmp_lg_u32 s1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v0, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: s_subb_u32 s1, s3, s7
+; GFX9-NEXT: v_mov_b32_e32 v3, s9
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[2:3], s[6:7], 0
+; GFX9-NEXT: s_ashr_i32 s4, s1, 31
+; GFX9-NEXT: s_xor_b64 vcc, s[2:3], vcc
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: s_add_u32 s0, s4, 0
+; GFX9-NEXT: s_cselect_b32 s2, 1, 0
+; GFX9-NEXT: s_and_b32 s2, s2, 1
+; GFX9-NEXT: s_cmp_lg_u32 s2, 0
+; GFX9-NEXT: s_addc_u32 s3, s4, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v5, s1
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v3, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v4
+; GFX9-NEXT: v_readfirstlane_b32 s1, v2
+; GFX9-NEXT: v_readfirstlane_b32 s2, v0
+; GFX9-NEXT: v_readfirstlane_b32 s3, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_v2i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_sub_u32 s8, s0, s4
+; GFX10-NEXT: s_cselect_b32 s9, 1, 0
+; GFX10-NEXT: v_cmp_gt_i64_e64 s4, s[4:5], 0
+; GFX10-NEXT: s_and_b32 s9, s9, 1
+; GFX10-NEXT: v_mov_b32_e32 v0, s8
+; GFX10-NEXT: s_cmp_lg_u32 s9, 0
+; GFX10-NEXT: s_brev_b32 s10, 1
+; GFX10-NEXT: s_subb_u32 s9, s1, s5
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[8:9], s[0:1]
+; GFX10-NEXT: s_ashr_i32 s1, s9, 31
+; GFX10-NEXT: v_mov_b32_e32 v1, s9
+; GFX10-NEXT: s_xor_b32 s8, s4, s0
+; GFX10-NEXT: s_add_u32 s0, s1, 0
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, s0, s8
+; GFX10-NEXT: s_and_b32 s4, s4, 1
+; GFX10-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-NEXT: s_addc_u32 s1, s1, s10
+; GFX10-NEXT: s_sub_u32 s4, s2, s6
+; GFX10-NEXT: s_cselect_b32 s5, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, s1, s8
+; GFX10-NEXT: s_and_b32 s5, s5, 1
+; GFX10-NEXT: v_mov_b32_e32 v2, s4
+; GFX10-NEXT: s_cmp_lg_u32 s5, 0
+; GFX10-NEXT: s_subb_u32 s5, s3, s7
+; GFX10-NEXT: v_cmp_lt_i64_e64 s2, s[4:5], s[2:3]
+; GFX10-NEXT: v_cmp_gt_i64_e64 s3, s[6:7], 0
+; GFX10-NEXT: s_ashr_i32 s1, s5, 31
+; GFX10-NEXT: v_mov_b32_e32 v3, s5
+; GFX10-NEXT: s_xor_b32 s2, s3, s2
+; GFX10-NEXT: s_add_u32 s0, s1, 0
+; GFX10-NEXT: s_cselect_b32 s3, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s0, s2
+; GFX10-NEXT: s_and_b32 s3, s3, 1
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: s_cmp_lg_u32 s3, 0
+; GFX10-NEXT: s_addc_u32 s1, s1, s10
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s1, s2
+; GFX10-NEXT: v_readfirstlane_b32 s1, v1
+; GFX10-NEXT: v_readfirstlane_b32 s2, v2
+; GFX10-NEXT: v_readfirstlane_b32 s3, v3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+ ret <2 x i64> %result
+}
+
+define amdgpu_ps i128 @s_ssubsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_i128:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_sub_u32 s8, s0, s4
+; GFX6-NEXT: s_cselect_b32 s9, 1, 0
+; GFX6-NEXT: s_and_b32 s9, s9, 1
+; GFX6-NEXT: s_cmp_lg_u32 s9, 0
+; GFX6-NEXT: s_subb_u32 s9, s1, s5
+; GFX6-NEXT: s_cselect_b32 s10, 1, 0
+; GFX6-NEXT: s_and_b32 s10, s10, 1
+; GFX6-NEXT: s_cmp_lg_u32 s10, 0
+; GFX6-NEXT: s_subb_u32 s10, s2, s6
+; GFX6-NEXT: s_cselect_b32 s11, 1, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, s1
+; GFX6-NEXT: s_and_b32 s11, s11, 1
+; GFX6-NEXT: v_mov_b32_e32 v2, s0
+; GFX6-NEXT: s_cmp_lg_u32 s11, 0
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3]
+; GFX6-NEXT: v_mov_b32_e32 v0, s2
+; GFX6-NEXT: s_subb_u32 s11, s3, s7
+; GFX6-NEXT: v_mov_b32_e32 v1, s3
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[10:11], v[0:1]
+; GFX6-NEXT: v_cmp_gt_u64_e64 s[0:1], s[4:5], 0
+; GFX6-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[10:11], v[0:1]
+; GFX6-NEXT: s_movk_i32 s2, 0x7f
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX6-NEXT: v_cmp_gt_i64_e64 s[0:1], s[6:7], 0
+; GFX6-NEXT: v_cmp_eq_u64_e64 vcc, s[6:7], 0
+; GFX6-NEXT: s_sub_i32 s6, s2, 64
+; GFX6-NEXT: s_sub_i32 s4, 64, s2
+; GFX6-NEXT: s_cmp_lt_u32 s2, 64
+; GFX6-NEXT: s_cselect_b32 s12, 1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s2, 0
+; GFX6-NEXT: s_cselect_b32 s13, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX6-NEXT: s_ashr_i64 s[0:1], s[10:11], s2
+; GFX6-NEXT: s_lshr_b64 s[2:3], s[8:9], s2
+; GFX6-NEXT: s_lshl_b64 s[4:5], s[10:11], s4
+; GFX6-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GFX6-NEXT: s_ashr_i32 s4, s11, 31
+; GFX6-NEXT: s_ashr_i64 s[6:7], s[10:11], s6
+; GFX6-NEXT: s_and_b32 s12, s12, 1
+; GFX6-NEXT: s_cmp_lg_u32 s12, 0
+; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], s[6:7]
+; GFX6-NEXT: s_and_b32 s6, s13, 1
+; GFX6-NEXT: s_cmp_lg_u32 s6, 0
+; GFX6-NEXT: s_cselect_b64 s[2:3], s[8:9], s[2:3]
+; GFX6-NEXT: s_mov_b32 s5, s4
+; GFX6-NEXT: s_cmp_lg_u32 s12, 0
+; GFX6-NEXT: s_cselect_b64 s[0:1], s[0:1], s[4:5]
+; GFX6-NEXT: s_add_u32 s2, s2, 0
+; GFX6-NEXT: s_cselect_b32 s4, 1, 0
+; GFX6-NEXT: s_and_b32 s4, s4, 1
+; GFX6-NEXT: s_cmp_lg_u32 s4, 0
+; GFX6-NEXT: s_addc_u32 s3, s3, 0
+; GFX6-NEXT: s_cselect_b32 s4, 1, 0
+; GFX6-NEXT: s_and_b32 s4, s4, 1
+; GFX6-NEXT: s_cmp_lg_u32 s4, 0
+; GFX6-NEXT: s_addc_u32 s0, s0, 0
+; GFX6-NEXT: s_cselect_b32 s4, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX6-NEXT: s_and_b32 s4, s4, 1
+; GFX6-NEXT: s_cmp_lg_u32 s4, 0
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: s_addc_u32 s1, s1, 0x80000000
+; GFX6-NEXT: v_mov_b32_e32 v1, s2
+; GFX6-NEXT: v_mov_b32_e32 v2, s3
+; GFX6-NEXT: v_mov_b32_e32 v3, s8
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_mov_b32_e32 v4, s9
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v4, v2, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s0
+; GFX6-NEXT: v_mov_b32_e32 v3, s1
+; GFX6-NEXT: v_mov_b32_e32 v4, s10
+; GFX6-NEXT: v_mov_b32_e32 v5, s11
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v0
+; GFX6-NEXT: v_readfirstlane_b32 s1, v1
+; GFX6-NEXT: v_readfirstlane_b32 s2, v2
+; GFX6-NEXT: v_readfirstlane_b32 s3, v3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_i128:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_sub_u32 s8, s0, s4
+; GFX8-NEXT: s_cselect_b32 s9, 1, 0
+; GFX8-NEXT: s_and_b32 s9, s9, 1
+; GFX8-NEXT: s_cmp_lg_u32 s9, 0
+; GFX8-NEXT: s_subb_u32 s9, s1, s5
+; GFX8-NEXT: s_cselect_b32 s10, 1, 0
+; GFX8-NEXT: s_and_b32 s10, s10, 1
+; GFX8-NEXT: s_cmp_lg_u32 s10, 0
+; GFX8-NEXT: s_subb_u32 s10, s2, s6
+; GFX8-NEXT: s_cselect_b32 s11, 1, 0
+; GFX8-NEXT: s_and_b32 s11, s11, 1
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: s_cmp_lg_u32 s11, 0
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: s_subb_u32 s11, s3, s7
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: s_cmp_eq_u64 s[10:11], s[2:3]
+; GFX8-NEXT: s_cselect_b32 s2, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[10:11], v[0:1]
+; GFX8-NEXT: s_and_b32 s0, 1, s2
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: v_cmp_gt_u64_e64 s[0:1], s[4:5], 0
+; GFX8-NEXT: s_cmp_eq_u64 s[6:7], 0
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX8-NEXT: v_cmp_gt_i64_e64 s[0:1], s[6:7], 0
+; GFX8-NEXT: s_cselect_b32 s2, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX8-NEXT: s_and_b32 s0, 1, s2
+; GFX8-NEXT: s_movk_i32 s2, 0x7f
+; GFX8-NEXT: s_sub_i32 s6, s2, 64
+; GFX8-NEXT: s_sub_i32 s4, 64, s2
+; GFX8-NEXT: s_cmp_lt_u32 s2, 64
+; GFX8-NEXT: s_cselect_b32 s12, 1, 0
+; GFX8-NEXT: s_cmp_eq_u32 s2, 0
+; GFX8-NEXT: s_cselect_b32 s13, 1, 0
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: s_ashr_i64 s[0:1], s[10:11], s2
+; GFX8-NEXT: s_lshr_b64 s[2:3], s[8:9], s2
+; GFX8-NEXT: s_lshl_b64 s[4:5], s[10:11], s4
+; GFX8-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GFX8-NEXT: s_ashr_i32 s4, s11, 31
+; GFX8-NEXT: s_ashr_i64 s[6:7], s[10:11], s6
+; GFX8-NEXT: s_and_b32 s12, s12, 1
+; GFX8-NEXT: s_cmp_lg_u32 s12, 0
+; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], s[6:7]
+; GFX8-NEXT: s_and_b32 s6, s13, 1
+; GFX8-NEXT: s_cmp_lg_u32 s6, 0
+; GFX8-NEXT: s_cselect_b64 s[2:3], s[8:9], s[2:3]
+; GFX8-NEXT: s_mov_b32 s5, s4
+; GFX8-NEXT: s_cmp_lg_u32 s12, 0
+; GFX8-NEXT: s_cselect_b64 s[0:1], s[0:1], s[4:5]
+; GFX8-NEXT: s_add_u32 s2, s2, 0
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: s_and_b32 s4, s4, 1
+; GFX8-NEXT: s_cmp_lg_u32 s4, 0
+; GFX8-NEXT: s_addc_u32 s3, s3, 0
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: s_and_b32 s4, s4, 1
+; GFX8-NEXT: s_cmp_lg_u32 s4, 0
+; GFX8-NEXT: s_addc_u32 s0, s0, 0
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_and_b32 s4, s4, 1
+; GFX8-NEXT: s_cmp_lg_u32 s4, 0
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: s_addc_u32 s1, s1, 0x80000000
+; GFX8-NEXT: v_mov_b32_e32 v1, s2
+; GFX8-NEXT: v_mov_b32_e32 v2, s3
+; GFX8-NEXT: v_mov_b32_e32 v3, s8
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_mov_b32_e32 v4, s9
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v4, v2, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_mov_b32_e32 v4, s10
+; GFX8-NEXT: v_mov_b32_e32 v5, s11
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v0
+; GFX8-NEXT: v_readfirstlane_b32 s1, v1
+; GFX8-NEXT: v_readfirstlane_b32 s2, v2
+; GFX8-NEXT: v_readfirstlane_b32 s3, v3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_i128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_sub_u32 s8, s0, s4
+; GFX9-NEXT: s_cselect_b32 s9, 1, 0
+; GFX9-NEXT: s_and_b32 s9, s9, 1
+; GFX9-NEXT: s_cmp_lg_u32 s9, 0
+; GFX9-NEXT: s_subb_u32 s9, s1, s5
+; GFX9-NEXT: s_cselect_b32 s10, 1, 0
+; GFX9-NEXT: s_and_b32 s10, s10, 1
+; GFX9-NEXT: s_cmp_lg_u32 s10, 0
+; GFX9-NEXT: s_subb_u32 s10, s2, s6
+; GFX9-NEXT: s_cselect_b32 s11, 1, 0
+; GFX9-NEXT: s_and_b32 s11, s11, 1
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: s_cmp_lg_u32 s11, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s0
+; GFX9-NEXT: s_subb_u32 s11, s3, s7
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[8:9], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: s_cmp_eq_u64 s[10:11], s[2:3]
+; GFX9-NEXT: s_cselect_b32 s2, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[10:11], v[0:1]
+; GFX9-NEXT: s_and_b32 s0, 1, s2
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: v_cmp_gt_u64_e64 s[0:1], s[4:5], 0
+; GFX9-NEXT: s_cmp_eq_u64 s[6:7], 0
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[6:7], 0
+; GFX9-NEXT: s_cselect_b32 s2, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX9-NEXT: s_and_b32 s0, 1, s2
+; GFX9-NEXT: s_movk_i32 s2, 0x7f
+; GFX9-NEXT: s_sub_i32 s6, s2, 64
+; GFX9-NEXT: s_sub_i32 s4, 64, s2
+; GFX9-NEXT: s_cmp_lt_u32 s2, 64
+; GFX9-NEXT: s_cselect_b32 s12, 1, 0
+; GFX9-NEXT: s_cmp_eq_u32 s2, 0
+; GFX9-NEXT: s_cselect_b32 s13, 1, 0
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: s_ashr_i64 s[0:1], s[10:11], s2
+; GFX9-NEXT: s_lshr_b64 s[2:3], s[8:9], s2
+; GFX9-NEXT: s_lshl_b64 s[4:5], s[10:11], s4
+; GFX9-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GFX9-NEXT: s_ashr_i32 s4, s11, 31
+; GFX9-NEXT: s_ashr_i64 s[6:7], s[10:11], s6
+; GFX9-NEXT: s_and_b32 s12, s12, 1
+; GFX9-NEXT: s_cmp_lg_u32 s12, 0
+; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], s[6:7]
+; GFX9-NEXT: s_and_b32 s6, s13, 1
+; GFX9-NEXT: s_cmp_lg_u32 s6, 0
+; GFX9-NEXT: s_cselect_b64 s[2:3], s[8:9], s[2:3]
+; GFX9-NEXT: s_mov_b32 s5, s4
+; GFX9-NEXT: s_cmp_lg_u32 s12, 0
+; GFX9-NEXT: s_cselect_b64 s[0:1], s[0:1], s[4:5]
+; GFX9-NEXT: s_add_u32 s2, s2, 0
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: s_and_b32 s4, s4, 1
+; GFX9-NEXT: s_cmp_lg_u32 s4, 0
+; GFX9-NEXT: s_addc_u32 s3, s3, 0
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: s_and_b32 s4, s4, 1
+; GFX9-NEXT: s_cmp_lg_u32 s4, 0
+; GFX9-NEXT: s_addc_u32 s0, s0, 0
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX9-NEXT: s_and_b32 s4, s4, 1
+; GFX9-NEXT: s_cmp_lg_u32 s4, 0
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: s_addc_u32 s1, s1, 0x80000000
+; GFX9-NEXT: v_mov_b32_e32 v1, s2
+; GFX9-NEXT: v_mov_b32_e32 v2, s3
+; GFX9-NEXT: v_mov_b32_e32 v3, s8
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_mov_b32_e32 v4, s9
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v4, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v2, s0
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: v_mov_b32_e32 v4, s10
+; GFX9-NEXT: v_mov_b32_e32 v5, s11
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v4, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v5, v3, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: v_readfirstlane_b32 s1, v1
+; GFX9-NEXT: v_readfirstlane_b32 s2, v2
+; GFX9-NEXT: v_readfirstlane_b32 s3, v3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_i128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_sub_u32 s8, s0, s4
+; GFX10-NEXT: s_cselect_b32 s9, 1, 0
+; GFX10-NEXT: s_movk_i32 s12, 0x7f
+; GFX10-NEXT: s_and_b32 s9, s9, 1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lg_u32 s9, 0
+; GFX10-NEXT: s_subb_u32 s9, s1, s5
+; GFX10-NEXT: s_cselect_b32 s10, 1, 0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s0, s[8:9], s[0:1]
+; GFX10-NEXT: s_and_b32 s10, s10, 1
+; GFX10-NEXT: s_cmp_lg_u32 s10, 0
+; GFX10-NEXT: s_subb_u32 s10, s2, s6
+; GFX10-NEXT: s_cselect_b32 s11, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX10-NEXT: s_and_b32 s11, s11, 1
+; GFX10-NEXT: v_mov_b32_e32 v3, s10
+; GFX10-NEXT: s_cmp_lg_u32 s11, 0
+; GFX10-NEXT: s_subb_u32 s11, s3, s7
+; GFX10-NEXT: s_cmp_eq_u64 s[10:11], s[2:3]
+; GFX10-NEXT: v_cmp_lt_i64_e64 s1, s[10:11], s[2:3]
+; GFX10-NEXT: s_cselect_b32 s0, 1, 0
+; GFX10-NEXT: v_mov_b32_e32 v4, s11
+; GFX10-NEXT: s_and_b32 s0, 1, s0
+; GFX10-NEXT: s_cmp_eq_u64 s[6:7], 0
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
+; GFX10-NEXT: v_cmp_gt_u64_e64 s0, s[4:5], 0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s1
+; GFX10-NEXT: s_cselect_b32 s1, 1, 0
+; GFX10-NEXT: s_sub_i32 s13, s12, 64
+; GFX10-NEXT: s_and_b32 s14, 1, s1
+; GFX10-NEXT: s_sub_i32 s2, 64, s12
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0
+; GFX10-NEXT: v_cmp_gt_i64_e64 s0, s[6:7], 0
+; GFX10-NEXT: s_cmp_lt_u32 s12, 64
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s14
+; GFX10-NEXT: s_cselect_b32 s15, 1, 0
+; GFX10-NEXT: s_cmp_eq_u32 s12, 0
+; GFX10-NEXT: s_cselect_b32 s16, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, s0
+; GFX10-NEXT: s_lshr_b64 s[0:1], s[8:9], s12
+; GFX10-NEXT: s_lshl_b64 s[2:3], s[10:11], s2
+; GFX10-NEXT: s_ashr_i64 s[4:5], s[10:11], s12
+; GFX10-NEXT: s_and_b32 s12, s15, 1
+; GFX10-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX10-NEXT: s_ashr_i32 s2, s11, 31
+; GFX10-NEXT: s_ashr_i64 s[6:7], s[10:11], s13
+; GFX10-NEXT: s_cmp_lg_u32 s12, 0
+; GFX10-NEXT: s_mov_b32 s3, s2
+; GFX10-NEXT: s_cselect_b64 s[0:1], s[0:1], s[6:7]
+; GFX10-NEXT: s_and_b32 s6, s16, 1
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc_lo
+; GFX10-NEXT: s_cmp_lg_u32 s6, 0
+; GFX10-NEXT: v_mov_b32_e32 v2, s9
+; GFX10-NEXT: s_cselect_b64 s[0:1], s[8:9], s[0:1]
+; GFX10-NEXT: s_cmp_lg_u32 s12, 0
+; GFX10-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX10-NEXT: s_cselect_b64 s[2:3], s[4:5], s[2:3]
+; GFX10-NEXT: s_add_u32 s0, s0, 0
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: v_mov_b32_e32 v1, s8
+; GFX10-NEXT: s_and_b32 s4, s4, 1
+; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX10-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-NEXT: s_addc_u32 s1, s1, 0
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT: s_and_b32 s4, s4, 1
+; GFX10-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-NEXT: s_addc_u32 s2, s2, 0
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, s0, vcc_lo
+; GFX10-NEXT: s_and_b32 s4, s4, 1
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, s1, vcc_lo
+; GFX10-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v3, s2, vcc_lo
+; GFX10-NEXT: s_addc_u32 s3, s3, 0x80000000
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v4, s3, vcc_lo
+; GFX10-NEXT: v_readfirstlane_b32 s1, v1
+; GFX10-NEXT: v_readfirstlane_b32 s2, v2
+; GFX10-NEXT: v_readfirstlane_b32 s3, v3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i128 @llvm.ssub.sat.i128(i128 %lhs, i128 %rhs)
+ ret i128 %result
+}
+
+define amdgpu_ps <4 x float> @ssubsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
+; GFX6-LABEL: ssubsat_i128_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v5, s1
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, s0, v0
+; GFX6-NEXT: v_subb_u32_e32 v5, vcc, v5, v1, vcc
+; GFX6-NEXT: v_mov_b32_e32 v6, s2
+; GFX6-NEXT: v_mov_b32_e32 v7, s3
+; GFX6-NEXT: v_subb_u32_e32 v6, vcc, v6, v2, vcc
+; GFX6-NEXT: v_subb_u32_e32 v7, vcc, v7, v3, vcc
+; GFX6-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[4:5]
+; GFX6-NEXT: s_movk_i32 s0, 0x7f
+; GFX6-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GFX6-NEXT: v_cmp_gt_i64_e32 vcc, s[2:3], v[6:7]
+; GFX6-NEXT: s_sub_i32 s1, s0, 64
+; GFX6-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[6:7]
+; GFX6-NEXT: s_sub_i32 s2, 64, s0
+; GFX6-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[0:1]
+; GFX6-NEXT: s_cmp_lt_u32 s0, 64
+; GFX6-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, 0, v[2:3]
+; GFX6-NEXT: s_cselect_b32 s3, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GFX6-NEXT: v_lshl_b64 v[2:3], v[6:7], s2
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX6-NEXT: v_xor_b32_e32 v10, v0, v8
+; GFX6-NEXT: v_lshr_b64 v[0:1], v[4:5], s0
+; GFX6-NEXT: s_cmp_eq_u32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s4, 1, 0
+; GFX6-NEXT: v_ashr_i64 v[8:9], v[6:7], s0
+; GFX6-NEXT: s_and_b32 s0, 1, s3
+; GFX6-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX6-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_ashr_i64 v[0:1], v[6:7], s1
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX6-NEXT: s_and_b32 s0, 1, s4
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX6-NEXT: s_and_b32 s0, 1, s3
+; GFX6-NEXT: v_ashrrev_i32_e32 v11, 31, v7
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, 0, v0
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX6-NEXT: v_bfrev_b32_e32 v8, 1
+; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v3, v8, vcc
+; GFX6-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: ssubsat_i128_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_sub_u32_e32 v4, vcc, s0, v0
+; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v5, v1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v6, s2
+; GFX8-NEXT: v_mov_b32_e32 v7, s3
+; GFX8-NEXT: v_subb_u32_e32 v6, vcc, v6, v2, vcc
+; GFX8-NEXT: v_subb_u32_e32 v7, vcc, v7, v3, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[4:5]
+; GFX8-NEXT: s_movk_i32 s0, 0x7f
+; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GFX8-NEXT: v_cmp_gt_i64_e32 vcc, s[2:3], v[6:7]
+; GFX8-NEXT: s_sub_i32 s1, s0, 64
+; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[6:7]
+; GFX8-NEXT: s_sub_i32 s2, 64, s0
+; GFX8-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[0:1]
+; GFX8-NEXT: s_cmp_lt_u32 s0, 64
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, 0, v[2:3]
+; GFX8-NEXT: s_cselect_b32 s3, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GFX8-NEXT: v_lshlrev_b64 v[2:3], s2, v[6:7]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX8-NEXT: v_xor_b32_e32 v10, v0, v8
+; GFX8-NEXT: v_lshrrev_b64 v[0:1], s0, v[4:5]
+; GFX8-NEXT: s_cmp_eq_u32 s0, 0
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: v_ashrrev_i64 v[8:9], s0, v[6:7]
+; GFX8-NEXT: s_and_b32 s0, 1, s3
+; GFX8-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX8-NEXT: v_ashrrev_i64 v[0:1], s1, v[6:7]
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: s_and_b32 s0, 1, s4
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: s_and_b32 s0, 1, s3
+; GFX8-NEXT: v_ashrrev_i32_e32 v11, 31, v7
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0, v0
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT: v_bfrev_b32_e32 v8, 1
+; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v8, vcc
+; GFX8-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: ssubsat_i128_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v5, s1
+; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, s0, v0
+; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v5, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v6, s2
+; GFX9-NEXT: v_mov_b32_e32 v7, s3
+; GFX9-NEXT: v_subb_co_u32_e32 v6, vcc, v6, v2, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v3, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[4:5]
+; GFX9-NEXT: s_movk_i32 s0, 0x7f
+; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GFX9-NEXT: v_cmp_gt_i64_e32 vcc, s[2:3], v[6:7]
+; GFX9-NEXT: s_sub_i32 s1, s0, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[6:7]
+; GFX9-NEXT: s_sub_i32 s2, 64, s0
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[0:1]
+; GFX9-NEXT: s_cmp_lt_u32 s0, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, 0, v[2:3]
+; GFX9-NEXT: s_cselect_b32 s3, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[2:3]
+; GFX9-NEXT: v_lshlrev_b64 v[2:3], s2, v[6:7]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-NEXT: v_xor_b32_e32 v10, v0, v8
+; GFX9-NEXT: v_lshrrev_b64 v[0:1], s0, v[4:5]
+; GFX9-NEXT: s_cmp_eq_u32 s0, 0
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: v_ashrrev_i64 v[8:9], s0, v[6:7]
+; GFX9-NEXT: s_and_b32 s0, 1, s3
+; GFX9-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX9-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX9-NEXT: v_ashrrev_i64 v[0:1], s1, v[6:7]
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: s_and_b32 s0, 1, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: s_and_b32 s0, 1, s3
+; GFX9-NEXT: v_ashrrev_i32_e32 v11, 31, v7
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: v_bfrev_b32_e32 v8, 1
+; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v8, vcc
+; GFX9-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: ssubsat_i128_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_sub_co_u32_e64 v4, vcc_lo, s0, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v5, vcc_lo, s1, v1, vcc_lo
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v6, vcc_lo, s2, v2, vcc_lo
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v7, vcc_lo, s3, v3, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[0:1], v[4:5]
+; GFX10-NEXT: s_movk_i32 s0, 0x7f
+; GFX10-NEXT: s_sub_i32 s1, 64, s0
+; GFX10-NEXT: v_lshrrev_b64 v[15:16], s0, v[4:5]
+; GFX10-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_i64_e32 vcc_lo, s[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[2:3], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v10, v9, v8, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, 0, v[0:1]
+; GFX10-NEXT: v_lshlrev_b64 v[8:9], s1, v[6:7]
+; GFX10-NEXT: s_sub_i32 s1, s0, 64
+; GFX10-NEXT: s_cmp_lt_u32 s0, 64
+; GFX10-NEXT: v_ashrrev_i64 v[0:1], s0, v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e64 v11, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, 0, v[2:3]
+; GFX10-NEXT: v_or_b32_e32 v8, v15, v8
+; GFX10-NEXT: v_or_b32_e32 v9, v16, v9
+; GFX10-NEXT: v_ashrrev_i32_e32 v15, 31, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[2:3]
+; GFX10-NEXT: v_ashrrev_i64 v[2:3], s1, v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e32 v11, v12, v11, vcc_lo
+; GFX10-NEXT: s_cselect_b32 vcc_lo, 1, 0
+; GFX10-NEXT: s_cmp_eq_u32 s0, 0
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v9, vcc_lo
+; GFX10-NEXT: s_cselect_b32 s1, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v8, vcc_lo
+; GFX10-NEXT: s_and_b32 s0, 1, s1
+; GFX10-NEXT: s_and_b32 s1, 1, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
+; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, s1
+; GFX10-NEXT: v_xor_b32_e32 v9, v11, v10
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v4, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v5, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v15, v0, s0
+; GFX10-NEXT: v_and_b32_e32 v8, 1, v9
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v15, v1, s0
+; GFX10-NEXT: v_add_co_u32_e64 v2, vcc_lo, v2, 0
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, v8
+; GFX10-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v0, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, 0x80000000, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, v2, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, v3, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v6, v8, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v7, v9, s0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i128 @llvm.ssub.sat.i128(i128 %lhs, i128 %rhs)
+ %cast = bitcast i128 %result to <4 x float>
+ ret <4 x float> %cast
+}
+
+define amdgpu_ps <4 x float> @ssubsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
+; GFX6-LABEL: ssubsat_i128_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v5, s1
+; GFX6-NEXT: v_subrev_i32_e32 v4, vcc, s0, v0
+; GFX6-NEXT: v_subb_u32_e32 v5, vcc, v1, v5, vcc
+; GFX6-NEXT: v_mov_b32_e32 v6, s2
+; GFX6-NEXT: v_mov_b32_e32 v7, s3
+; GFX6-NEXT: v_subb_u32_e32 v6, vcc, v2, v6, vcc
+; GFX6-NEXT: v_subb_u32_e32 v7, vcc, v3, v7, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
+; GFX6-NEXT: v_cmp_gt_u64_e64 s[0:1], s[0:1], 0
+; GFX6-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, v[6:7], v[2:3]
+; GFX6-NEXT: v_ashrrev_i32_e32 v11, 31, v7
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX6-NEXT: v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
+; GFX6-NEXT: v_cmp_eq_u64_e64 vcc, s[2:3], 0
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX6-NEXT: s_movk_i32 s0, 0x7f
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX6-NEXT: s_sub_i32 s2, 64, s0
+; GFX6-NEXT: s_sub_i32 s1, s0, 64
+; GFX6-NEXT: v_xor_b32_e32 v10, v1, v0
+; GFX6-NEXT: s_cmp_lt_u32 s0, 64
+; GFX6-NEXT: v_lshr_b64 v[0:1], v[4:5], s0
+; GFX6-NEXT: v_lshl_b64 v[2:3], v[6:7], s2
+; GFX6-NEXT: s_cselect_b32 s3, 1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s0, 0
+; GFX6-NEXT: s_cselect_b32 s4, 1, 0
+; GFX6-NEXT: v_ashr_i64 v[8:9], v[6:7], s0
+; GFX6-NEXT: s_and_b32 s0, 1, s3
+; GFX6-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX6-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_ashr_i64 v[0:1], v[6:7], s1
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX6-NEXT: s_and_b32 s0, 1, s4
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX6-NEXT: s_and_b32 s0, 1, s3
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, 0, v0
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX6-NEXT: v_bfrev_b32_e32 v8, 1
+; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v3, v8, vcc
+; GFX6-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: ssubsat_i128_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_subrev_u32_e32 v4, vcc, s0, v0
+; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v1, v5, vcc
+; GFX8-NEXT: v_mov_b32_e32 v6, s2
+; GFX8-NEXT: v_mov_b32_e32 v7, s3
+; GFX8-NEXT: v_subb_u32_e32 v6, vcc, v2, v6, vcc
+; GFX8-NEXT: v_subb_u32_e32 v7, vcc, v3, v7, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
+; GFX8-NEXT: v_cmp_gt_u64_e64 s[0:1], s[0:1], 0
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[6:7], v[2:3]
+; GFX8-NEXT: s_cmp_eq_u64 s[2:3], 0
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX8-NEXT: v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
+; GFX8-NEXT: v_ashrrev_i32_e32 v11, 31, v7
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX8-NEXT: s_and_b32 s0, 1, s4
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: s_movk_i32 s0, 0x7f
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT: s_sub_i32 s2, 64, s0
+; GFX8-NEXT: s_sub_i32 s1, s0, 64
+; GFX8-NEXT: v_xor_b32_e32 v10, v1, v0
+; GFX8-NEXT: s_cmp_lt_u32 s0, 64
+; GFX8-NEXT: v_lshrrev_b64 v[0:1], s0, v[4:5]
+; GFX8-NEXT: v_lshlrev_b64 v[2:3], s2, v[6:7]
+; GFX8-NEXT: s_cselect_b32 s3, 1, 0
+; GFX8-NEXT: s_cmp_eq_u32 s0, 0
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: v_ashrrev_i64 v[8:9], s0, v[6:7]
+; GFX8-NEXT: s_and_b32 s0, 1, s3
+; GFX8-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX8-NEXT: v_ashrrev_i64 v[0:1], s1, v[6:7]
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: s_and_b32 s0, 1, s4
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: s_and_b32 s0, 1, s3
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0, v0
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT: v_bfrev_b32_e32 v8, 1
+; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v8, vcc
+; GFX8-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: ssubsat_i128_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v5, s1
+; GFX9-NEXT: v_subrev_co_u32_e32 v4, vcc, s0, v0
+; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v1, v5, vcc
+; GFX9-NEXT: v_mov_b32_e32 v6, s2
+; GFX9-NEXT: v_mov_b32_e32 v7, s3
+; GFX9-NEXT: v_subb_co_u32_e32 v6, vcc, v2, v6, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, v3, v7, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT: v_cmp_gt_u64_e64 s[0:1], s[0:1], 0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT: s_cmp_eq_u64 s[2:3], 0
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[2:3], 0
+; GFX9-NEXT: v_ashrrev_i32_e32 v11, 31, v7
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX9-NEXT: s_and_b32 s0, 1, s4
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: s_movk_i32 s0, 0x7f
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: s_sub_i32 s2, 64, s0
+; GFX9-NEXT: s_sub_i32 s1, s0, 64
+; GFX9-NEXT: v_xor_b32_e32 v10, v1, v0
+; GFX9-NEXT: s_cmp_lt_u32 s0, 64
+; GFX9-NEXT: v_lshrrev_b64 v[0:1], s0, v[4:5]
+; GFX9-NEXT: v_lshlrev_b64 v[2:3], s2, v[6:7]
+; GFX9-NEXT: s_cselect_b32 s3, 1, 0
+; GFX9-NEXT: s_cmp_eq_u32 s0, 0
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: v_ashrrev_i64 v[8:9], s0, v[6:7]
+; GFX9-NEXT: s_and_b32 s0, 1, s3
+; GFX9-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX9-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX9-NEXT: v_ashrrev_i64 v[0:1], s1, v[6:7]
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: s_and_b32 s0, 1, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: s_and_b32 s0, 1, s3
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v5, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: v_bfrev_b32_e32 v8, 1
+; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v8, vcc
+; GFX9-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v4, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v5, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v6, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v7, v3, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: ssubsat_i128_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_mov_b32_e32 v5, v0
+; GFX10-NEXT: v_mov_b32_e32 v6, v1
+; GFX10-NEXT: v_mov_b32_e32 v9, v2
+; GFX10-NEXT: v_mov_b32_e32 v10, v3
+; GFX10-NEXT: s_cmp_eq_u64 s[2:3], 0
+; GFX10-NEXT: v_sub_co_u32_e64 v15, vcc_lo, v5, s0
+; GFX10-NEXT: v_cmp_gt_u64_e64 s0, s[0:1], 0
+; GFX10-NEXT: v_subrev_co_ci_u32_e32 v16, vcc_lo, s1, v6, vcc_lo
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: v_subrev_co_ci_u32_e32 v19, vcc_lo, s2, v9, vcc_lo
+; GFX10-NEXT: s_and_b32 s1, 1, s4
+; GFX10-NEXT: v_subrev_co_ci_u32_e32 v20, vcc_lo, s3, v10, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[15:16], v[5:6]
+; GFX10-NEXT: v_cndmask_b32_e64 v8, 0, 1, s0
+; GFX10-NEXT: v_cmp_gt_i64_e64 s0, s[2:3], 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_ashrrev_i32_e32 v7, 31, v20
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[19:20], v[9:10]
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[19:20], v[9:10]
+; GFX10-NEXT: v_cndmask_b32_e64 v9, 0, 1, s0
+; GFX10-NEXT: s_movk_i32 s0, 0x7f
+; GFX10-NEXT: s_sub_i32 s2, 64, s0
+; GFX10-NEXT: v_cndmask_b32_e32 v10, v1, v0, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1
+; GFX10-NEXT: v_lshrrev_b64 v[0:1], s0, v[15:16]
+; GFX10-NEXT: v_lshlrev_b64 v[2:3], s2, v[19:20]
+; GFX10-NEXT: s_sub_i32 s1, s0, 64
+; GFX10-NEXT: s_cmp_lt_u32 s0, 64
+; GFX10-NEXT: v_cndmask_b32_e32 v11, v9, v8, vcc_lo
+; GFX10-NEXT: s_cselect_b32 vcc_lo, 1, 0
+; GFX10-NEXT: v_ashrrev_i64 v[8:9], s1, v[19:20]
+; GFX10-NEXT: s_cmp_eq_u32 s0, 0
+; GFX10-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX10-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX10-NEXT: s_cselect_b32 s1, 1, 0
+; GFX10-NEXT: v_ashrrev_i64 v[0:1], s0, v[19:20]
+; GFX10-NEXT: s_and_b32 s0, 1, s1
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc_lo
+; GFX10-NEXT: s_and_b32 s1, 1, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
+; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, s1
+; GFX10-NEXT: v_xor_b32_e32 v9, v11, v10
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v15, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v16, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v7, v0, s0
+; GFX10-NEXT: v_and_b32_e32 v8, 1, v9
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v7, v1, s0
+; GFX10-NEXT: v_add_co_u32_e64 v2, vcc_lo, v2, 0
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, v8
+; GFX10-NEXT: v_add_co_ci_u32_e32 v8, vcc_lo, 0, v0, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v9, vcc_lo, 0x80000000, v1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v15, v2, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v16, v3, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v19, v8, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v20, v9, s0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i128 @llvm.ssub.sat.i128(i128 %lhs, i128 %rhs)
+ %cast = bitcast i128 %result to <4 x float>
+ ret <4 x float> %cast
+}
+
+define <2 x i128> @v_ssubsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
+; GFX6-LABEL: v_ssubsat_v2i128:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, v0, v8
+; GFX6-NEXT: v_subb_u32_e32 v17, vcc, v1, v9, vcc
+; GFX6-NEXT: v_subb_u32_e32 v18, vcc, v2, v10, vcc
+; GFX6-NEXT: v_subb_u32_e32 v19, vcc, v3, v11, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[16:17], v[0:1]
+; GFX6-NEXT: s_movk_i32 s6, 0x7f
+; GFX6-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, v[18:19], v[2:3]
+; GFX6-NEXT: s_sub_i32 s7, s6, 64
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[18:19], v[2:3]
+; GFX6-NEXT: s_sub_i32 s8, 64, s6
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[8:9]
+; GFX6-NEXT: s_cmp_lt_u32 s6, 64
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, 0, v[10:11]
+; GFX6-NEXT: s_cselect_b32 s4, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GFX6-NEXT: s_cmp_eq_u32 s6, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX6-NEXT: v_xor_b32_e32 v10, v1, v0
+; GFX6-NEXT: v_lshr_b64 v[0:1], v[16:17], s6
+; GFX6-NEXT: v_lshl_b64 v[2:3], v[18:19], s8
+; GFX6-NEXT: s_cselect_b32 s5, 1, 0
+; GFX6-NEXT: s_and_b32 s4, 1, s4
+; GFX6-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX6-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX6-NEXT: v_ashr_i64 v[0:1], v[18:19], s7
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX6-NEXT: s_and_b32 s4, 1, s5
+; GFX6-NEXT: v_ashr_i64 v[8:9], v[18:19], s6
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, s4
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX6-NEXT: v_ashrrev_i32_e32 v11, 31, v19
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v0, v16, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v1, v17, s[4:5]
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, 0, v0
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX6-NEXT: s_brev_b32 s4, 1
+; GFX6-NEXT: v_mov_b32_e32 v8, s4
+; GFX6-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v3, v8, vcc
+; GFX6-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, v4, v12
+; GFX6-NEXT: v_subb_u32_e32 v9, vcc, v5, v13, vcc
+; GFX6-NEXT: v_subb_u32_e32 v10, vcc, v6, v14, vcc
+; GFX6-NEXT: v_subb_u32_e32 v11, vcc, v7, v15, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
+; GFX6-NEXT: s_cmp_lt_u32 s6, 64
+; GFX6-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, v[10:11], v[6:7]
+; GFX6-NEXT: s_cselect_b32 s5, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[10:11], v[6:7]
+; GFX6-NEXT: s_cmp_eq_u32 s6, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[12:13]
+; GFX6-NEXT: s_cselect_b32 s9, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, 0, v[14:15]
+; GFX6-NEXT: v_ashr_i64 v[12:13], v[10:11], s6
+; GFX6-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; GFX6-NEXT: v_ashrrev_i32_e32 v15, 31, v11
+; GFX6-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GFX6-NEXT: v_xor_b32_e32 v14, v5, v4
+; GFX6-NEXT: v_lshr_b64 v[4:5], v[8:9], s6
+; GFX6-NEXT: v_lshl_b64 v[6:7], v[10:11], s8
+; GFX6-NEXT: s_and_b32 s6, 1, s5
+; GFX6-NEXT: v_or_b32_e32 v6, v4, v6
+; GFX6-NEXT: v_or_b32_e32 v7, v5, v7
+; GFX6-NEXT: v_ashr_i64 v[4:5], v[10:11], s7
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s6
+; GFX6-NEXT: s_and_b32 s6, 1, s9
+; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s6
+; GFX6-NEXT: s_and_b32 s5, 1, s5
+; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc
+; GFX6-NEXT: v_cmp_ne_u32_e64 vcc, 0, s5
+; GFX6-NEXT: v_cndmask_b32_e32 v6, v15, v12, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v7, v15, v13, vcc
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, 0, v4
+; GFX6-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; GFX6-NEXT: v_mov_b32_e32 v12, s4
+; GFX6-NEXT: v_addc_u32_e32 v6, vcc, 0, v6, vcc
+; GFX6-NEXT: v_addc_u32_e32 v7, vcc, v7, v12, vcc
+; GFX6-NEXT: v_and_b32_e32 v12, 1, v14
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX6-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v5, v9, v5, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v6, v10, v6, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v7, v11, v7, vcc
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_ssubsat_v2i128:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_sub_u32_e32 v16, vcc, v0, v8
+; GFX8-NEXT: v_subb_u32_e32 v17, vcc, v1, v9, vcc
+; GFX8-NEXT: v_subb_u32_e32 v18, vcc, v2, v10, vcc
+; GFX8-NEXT: v_subb_u32_e32 v19, vcc, v3, v11, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[16:17], v[0:1]
+; GFX8-NEXT: s_movk_i32 s6, 0x7f
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[18:19], v[2:3]
+; GFX8-NEXT: s_sub_i32 s7, s6, 64
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[18:19], v[2:3]
+; GFX8-NEXT: s_sub_i32 s8, 64, s6
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[8:9]
+; GFX8-NEXT: s_cmp_lt_u32 s6, 64
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, 0, v[10:11]
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GFX8-NEXT: s_cmp_eq_u32 s6, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT: v_xor_b32_e32 v10, v1, v0
+; GFX8-NEXT: v_lshrrev_b64 v[0:1], s6, v[16:17]
+; GFX8-NEXT: v_lshlrev_b64 v[2:3], s8, v[18:19]
+; GFX8-NEXT: s_cselect_b32 s5, 1, 0
+; GFX8-NEXT: s_and_b32 s4, 1, s4
+; GFX8-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX8-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX8-NEXT: v_ashrrev_i64 v[0:1], s7, v[18:19]
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX8-NEXT: s_and_b32 s4, 1, s5
+; GFX8-NEXT: v_ashrrev_i64 v[8:9], s6, v[18:19]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, s4
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX8-NEXT: v_ashrrev_i32_e32 v11, 31, v19
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, v16, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, v17, s[4:5]
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, 0, v0
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
+; GFX8-NEXT: s_brev_b32 s4, 1
+; GFX8-NEXT: v_mov_b32_e32 v8, s4
+; GFX8-NEXT: v_addc_u32_e32 v2, vcc, 0, v2, vcc
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v8, vcc
+; GFX8-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
+; GFX8-NEXT: v_sub_u32_e32 v8, vcc, v4, v12
+; GFX8-NEXT: v_subb_u32_e32 v9, vcc, v5, v13, vcc
+; GFX8-NEXT: v_subb_u32_e32 v10, vcc, v6, v14, vcc
+; GFX8-NEXT: v_subb_u32_e32 v11, vcc, v7, v15, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
+; GFX8-NEXT: s_cmp_lt_u32 s6, 64
+; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, v[10:11], v[6:7]
+; GFX8-NEXT: s_cselect_b32 s5, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[10:11], v[6:7]
+; GFX8-NEXT: s_cmp_eq_u32 s6, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[12:13]
+; GFX8-NEXT: s_cselect_b32 s9, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, 0, v[14:15]
+; GFX8-NEXT: v_ashrrev_i64 v[12:13], s6, v[10:11]
+; GFX8-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; GFX8-NEXT: v_ashrrev_i32_e32 v15, 31, v11
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GFX8-NEXT: v_xor_b32_e32 v14, v5, v4
+; GFX8-NEXT: v_lshrrev_b64 v[4:5], s6, v[8:9]
+; GFX8-NEXT: v_lshlrev_b64 v[6:7], s8, v[10:11]
+; GFX8-NEXT: s_and_b32 s6, 1, s5
+; GFX8-NEXT: v_or_b32_e32 v6, v4, v6
+; GFX8-NEXT: v_or_b32_e32 v7, v5, v7
+; GFX8-NEXT: v_ashrrev_i64 v[4:5], s7, v[10:11]
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s6
+; GFX8-NEXT: s_and_b32 s6, 1, s9
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s6
+; GFX8-NEXT: s_and_b32 s5, 1, s5
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s5
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v15, v12, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v15, v13, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, 0, v4
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
+; GFX8-NEXT: v_mov_b32_e32 v12, s4
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, 0, v6, vcc
+; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v7, v12, vcc
+; GFX8-NEXT: v_and_b32_e32 v12, 1, v14
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v9, v5, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v10, v6, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v11, v7, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_ssubsat_v2i128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_sub_co_u32_e32 v16, vcc, v0, v8
+; GFX9-NEXT: v_subb_co_u32_e32 v17, vcc, v1, v9, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v18, vcc, v2, v10, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v19, vcc, v3, v11, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[16:17], v[0:1]
+; GFX9-NEXT: s_movk_i32 s6, 0x7f
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[18:19], v[2:3]
+; GFX9-NEXT: s_sub_i32 s7, s6, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[18:19], v[2:3]
+; GFX9-NEXT: s_sub_i32 s8, 64, s6
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[8:9]
+; GFX9-NEXT: s_cmp_lt_u32 s6, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, 0, v[10:11]
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[10:11]
+; GFX9-NEXT: s_cmp_eq_u32 s6, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: v_xor_b32_e32 v10, v1, v0
+; GFX9-NEXT: v_lshrrev_b64 v[0:1], s6, v[16:17]
+; GFX9-NEXT: v_lshlrev_b64 v[2:3], s8, v[18:19]
+; GFX9-NEXT: s_cselect_b32 s5, 1, 0
+; GFX9-NEXT: s_and_b32 s4, 1, s4
+; GFX9-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX9-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX9-NEXT: v_ashrrev_i64 v[0:1], s7, v[18:19]
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX9-NEXT: s_and_b32 s4, 1, s5
+; GFX9-NEXT: v_ashrrev_i64 v[8:9], s6, v[18:19]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 s[4:5], 0, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v1, v3, vcc
+; GFX9-NEXT: v_ashrrev_i32_e32 v11, 31, v19
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, v16, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, v17, s[4:5]
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v11, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v11, v9, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
+; GFX9-NEXT: s_brev_b32 s4, 1
+; GFX9-NEXT: v_mov_b32_e32 v8, s4
+; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, 0, v2, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v8, vcc
+; GFX9-NEXT: v_and_b32_e32 v8, 1, v10
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v16, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v17, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v18, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v19, v3, vcc
+; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v4, v12
+; GFX9-NEXT: v_subb_co_u32_e32 v9, vcc, v5, v13, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v10, vcc, v6, v14, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v11, vcc, v7, v15, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[8:9], v[4:5]
+; GFX9-NEXT: s_cmp_lt_u32 s6, 64
+; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, v[10:11], v[6:7]
+; GFX9-NEXT: s_cselect_b32 s5, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[10:11], v[6:7]
+; GFX9-NEXT: s_cmp_eq_u32 s6, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, 0, v[12:13]
+; GFX9-NEXT: s_cselect_b32 s9, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, 0, v[14:15]
+; GFX9-NEXT: v_ashrrev_i64 v[12:13], s6, v[10:11]
+; GFX9-NEXT: v_cndmask_b32_e64 v6, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, 0, v[14:15]
+; GFX9-NEXT: v_ashrrev_i32_e32 v15, 31, v11
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc
+; GFX9-NEXT: v_xor_b32_e32 v14, v5, v4
+; GFX9-NEXT: v_lshrrev_b64 v[4:5], s6, v[8:9]
+; GFX9-NEXT: v_lshlrev_b64 v[6:7], s8, v[10:11]
+; GFX9-NEXT: s_and_b32 s6, 1, s5
+; GFX9-NEXT: v_or_b32_e32 v6, v4, v6
+; GFX9-NEXT: v_or_b32_e32 v7, v5, v7
+; GFX9-NEXT: v_ashrrev_i64 v[4:5], s7, v[10:11]
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s6
+; GFX9-NEXT: s_and_b32 s6, 1, s9
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v7, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s6
+; GFX9-NEXT: s_and_b32 s5, 1, s5
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v8, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v5, v9, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s5
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v15, v12, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v15, v13, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, 0, v4
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, 0, v5, vcc
+; GFX9-NEXT: v_mov_b32_e32 v12, s4
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, 0, v6, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v7, v12, vcc
+; GFX9-NEXT: v_and_b32_e32 v12, 1, v14
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v8, v4, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v9, v5, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v10, v6, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v11, v7, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_ssubsat_v2i128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_mov_b32_e32 v22, v0
+; GFX10-NEXT: v_mov_b32_e32 v23, v1
+; GFX10-NEXT: v_mov_b32_e32 v20, v2
+; GFX10-NEXT: v_mov_b32_e32 v21, v3
+; GFX10-NEXT: s_movk_i32 s5, 0x7f
+; GFX10-NEXT: v_sub_co_u32_e64 v16, vcc_lo, v22, v8
+; GFX10-NEXT: s_sub_i32 s6, 64, s5
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v17, vcc_lo, v23, v9, vcc_lo
+; GFX10-NEXT: s_sub_i32 s7, s5, 64
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v18, vcc_lo, v20, v10, vcc_lo
+; GFX10-NEXT: s_cmp_lt_u32 s5, 64
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v19, vcc_lo, v21, v11, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[16:17], v[22:23]
+; GFX10-NEXT: v_mov_b32_e32 v26, v4
+; GFX10-NEXT: v_mov_b32_e32 v27, v5
+; GFX10-NEXT: v_mov_b32_e32 v24, v6
+; GFX10-NEXT: v_lshlrev_b64 v[2:3], s6, v[18:19]
+; GFX10-NEXT: v_mov_b32_e32 v25, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, v[18:19], v[20:21]
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[18:19], v[20:21]
+; GFX10-NEXT: v_cndmask_b32_e32 v20, v1, v0, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, 0, v[8:9]
+; GFX10-NEXT: v_lshrrev_b64 v[0:1], s5, v[16:17]
+; GFX10-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_i64_e32 vcc_lo, 0, v[10:11]
+; GFX10-NEXT: v_or_b32_e32 v2, v0, v2
+; GFX10-NEXT: v_or_b32_e32 v3, v1, v3
+; GFX10-NEXT: v_ashrrev_i64 v[0:1], s5, v[18:19]
+; GFX10-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, 0, v[10:11]
+; GFX10-NEXT: v_ashrrev_i32_e32 v11, 31, v19
+; GFX10-NEXT: v_cndmask_b32_e32 v10, v9, v8, vcc_lo
+; GFX10-NEXT: s_cselect_b32 vcc_lo, 1, 0
+; GFX10-NEXT: v_ashrrev_i64 v[8:9], s7, v[18:19]
+; GFX10-NEXT: s_cmp_eq_u32 s5, 0
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: s_and_b32 s8, 1, vcc_lo
+; GFX10-NEXT: s_and_b32 s4, 1, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s4
+; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, s8
+; GFX10-NEXT: v_xor_b32_e32 v9, v10, v20
+; GFX10-NEXT: s_brev_b32 s8, 1
+; GFX10-NEXT: s_cmp_lt_u32 s5, 64
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v2, v16, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v3, v17, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v11, v0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v11, v1, s4
+; GFX10-NEXT: v_and_b32_e32 v8, 1, v9
+; GFX10-NEXT: v_add_co_u32_e64 v2, vcc_lo, v2, 0
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, 0, v3, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v20, vcc_lo, 0, v0, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v21, vcc_lo, s8, v1, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
+; GFX10-NEXT: v_sub_co_u32_e64 v8, s4, v26, v12
+; GFX10-NEXT: v_sub_co_ci_u32_e64 v9, s4, v27, v13, s4
+; GFX10-NEXT: v_sub_co_ci_u32_e64 v10, s4, v24, v14, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v16, v2, vcc_lo
+; GFX10-NEXT: v_sub_co_ci_u32_e64 v11, s4, v25, v15, s4
+; GFX10-NEXT: v_cmp_lt_u64_e64 s4, v[8:9], v[26:27]
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v17, v3, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e32 v2, v18, v20, vcc_lo
+; GFX10-NEXT: v_lshrrev_b64 v[3:4], s5, v[8:9]
+; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s4
+; GFX10-NEXT: v_cmp_lt_i64_e64 s4, v[10:11], v[24:25]
+; GFX10-NEXT: v_cndmask_b32_e64 v16, 0, 1, s4
+; GFX10-NEXT: v_cmp_lt_u64_e64 s4, 0, v[12:13]
+; GFX10-NEXT: v_lshlrev_b64 v[12:13], s6, v[10:11]
+; GFX10-NEXT: v_cndmask_b32_e64 v17, 0, 1, s4
+; GFX10-NEXT: v_cmp_lt_i64_e64 s4, 0, v[14:15]
+; GFX10-NEXT: v_or_b32_e32 v12, v3, v12
+; GFX10-NEXT: v_or_b32_e32 v13, v4, v13
+; GFX10-NEXT: v_ashrrev_i64 v[3:4], s5, v[10:11]
+; GFX10-NEXT: v_cndmask_b32_e64 v18, 0, 1, s4
+; GFX10-NEXT: v_cmp_eq_u64_e64 s4, v[10:11], v[24:25]
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v16, v5, s4
+; GFX10-NEXT: v_cmp_eq_u64_e64 s4, 0, v[14:15]
+; GFX10-NEXT: v_ashrrev_i64 v[5:6], s7, v[10:11]
+; GFX10-NEXT: v_cndmask_b32_e64 v14, v18, v17, s4
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: s_cmp_eq_u32 s5, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v12, s4
+; GFX10-NEXT: s_cselect_b32 s6, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v13, s4
+; GFX10-NEXT: s_and_b32 s5, 1, s6
+; GFX10-NEXT: s_and_b32 s6, 1, s4
+; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, s5
+; GFX10-NEXT: v_xor_b32_e32 v7, v14, v7
+; GFX10-NEXT: v_ashrrev_i32_e32 v18, 31, v11
+; GFX10-NEXT: v_cmp_ne_u32_e64 s5, 0, s6
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, v8, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, v9, s4
+; GFX10-NEXT: v_and_b32_e32 v7, 1, v7
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v18, v3, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v18, v4, s5
+; GFX10-NEXT: v_add_co_u32_e64 v5, s4, v5, 0
+; GFX10-NEXT: v_add_co_ci_u32_e64 v6, s4, 0, v6, s4
+; GFX10-NEXT: v_cmp_ne_u32_e64 s5, 0, v7
+; GFX10-NEXT: v_add_co_ci_u32_e64 v7, s4, 0, v3, s4
+; GFX10-NEXT: v_cndmask_b32_e32 v3, v19, v21, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e64 v12, s4, s8, v4, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v8, v5, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v9, v6, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v10, v7, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v11, v12, s5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128> %lhs, <2 x i128> %rhs)
+ ret <2 x i128> %result
+}
+
+define amdgpu_ps <2 x i128> @s_ssubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128> inreg %rhs) {
+; GFX6-LABEL: s_ssubsat_v2i128:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_sub_u32 s16, s0, s8
+; GFX6-NEXT: s_cselect_b32 s17, 1, 0
+; GFX6-NEXT: s_and_b32 s17, s17, 1
+; GFX6-NEXT: s_cmp_lg_u32 s17, 0
+; GFX6-NEXT: s_subb_u32 s17, s1, s9
+; GFX6-NEXT: s_cselect_b32 s18, 1, 0
+; GFX6-NEXT: s_and_b32 s18, s18, 1
+; GFX6-NEXT: s_cmp_lg_u32 s18, 0
+; GFX6-NEXT: s_subb_u32 s18, s2, s10
+; GFX6-NEXT: s_cselect_b32 s19, 1, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, s1
+; GFX6-NEXT: s_and_b32 s19, s19, 1
+; GFX6-NEXT: v_mov_b32_e32 v2, s0
+; GFX6-NEXT: s_cmp_lg_u32 s19, 0
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[16:17], v[2:3]
+; GFX6-NEXT: v_mov_b32_e32 v0, s2
+; GFX6-NEXT: s_subb_u32 s19, s3, s11
+; GFX6-NEXT: s_movk_i32 s20, 0x7f
+; GFX6-NEXT: v_mov_b32_e32 v1, s3
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[18:19], v[0:1]
+; GFX6-NEXT: v_cmp_gt_u64_e64 s[0:1], s[8:9], 0
+; GFX6-NEXT: s_sub_i32 s21, s20, 64
+; GFX6-NEXT: s_sub_i32 s22, 64, s20
+; GFX6-NEXT: s_cmp_lt_u32 s20, 64
+; GFX6-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[18:19], v[0:1]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX6-NEXT: s_cselect_b32 s23, 1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s20, 0
+; GFX6-NEXT: v_cmp_gt_i64_e64 s[0:1], s[10:11], 0
+; GFX6-NEXT: s_cselect_b32 s24, 1, 0
+; GFX6-NEXT: s_lshr_b64 s[2:3], s[16:17], s20
+; GFX6-NEXT: s_lshl_b64 s[8:9], s[18:19], s22
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX6-NEXT: v_cmp_eq_u64_e64 vcc, s[10:11], 0
+; GFX6-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
+; GFX6-NEXT: s_ashr_i32 s8, s19, 31
+; GFX6-NEXT: s_ashr_i64 s[0:1], s[18:19], s20
+; GFX6-NEXT: s_ashr_i64 s[10:11], s[18:19], s21
+; GFX6-NEXT: s_and_b32 s23, s23, 1
+; GFX6-NEXT: s_cmp_lg_u32 s23, 0
+; GFX6-NEXT: s_cselect_b64 s[2:3], s[2:3], s[10:11]
+; GFX6-NEXT: s_and_b32 s10, s24, 1
+; GFX6-NEXT: s_cmp_lg_u32 s10, 0
+; GFX6-NEXT: s_cselect_b64 s[2:3], s[16:17], s[2:3]
+; GFX6-NEXT: s_cmp_lg_u32 s23, 0
+; GFX6-NEXT: s_mov_b32 s9, s8
+; GFX6-NEXT: s_cselect_b64 s[0:1], s[0:1], s[8:9]
+; GFX6-NEXT: s_add_u32 s2, s2, 0
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: s_addc_u32 s3, s3, 0
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: s_addc_u32 s0, s0, 0
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX6-NEXT: s_brev_b32 s23, 1
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: s_addc_u32 s1, s1, s23
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_mov_b32_e32 v0, s0
+; GFX6-NEXT: v_mov_b32_e32 v1, s2
+; GFX6-NEXT: v_mov_b32_e32 v3, s16
+; GFX6-NEXT: s_sub_u32 s0, s4, s12
+; GFX6-NEXT: v_cndmask_b32_e32 v5, v3, v1, vcc
+; GFX6-NEXT: v_mov_b32_e32 v1, s1
+; GFX6-NEXT: s_cselect_b32 s1, 1, 0
+; GFX6-NEXT: s_and_b32 s1, s1, 1
+; GFX6-NEXT: s_cmp_lg_u32 s1, 0
+; GFX6-NEXT: s_subb_u32 s1, s5, s13
+; GFX6-NEXT: s_cselect_b32 s2, 1, 0
+; GFX6-NEXT: s_and_b32 s2, s2, 1
+; GFX6-NEXT: s_cmp_lg_u32 s2, 0
+; GFX6-NEXT: v_mov_b32_e32 v2, s3
+; GFX6-NEXT: v_mov_b32_e32 v4, s17
+; GFX6-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s18
+; GFX6-NEXT: v_mov_b32_e32 v3, s19
+; GFX6-NEXT: s_subb_u32 s2, s6, s14
+; GFX6-NEXT: s_cselect_b32 s3, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v6, v2, v0, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v7, v3, v1, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s4
+; GFX6-NEXT: s_and_b32 s3, s3, 1
+; GFX6-NEXT: v_mov_b32_e32 v3, s5
+; GFX6-NEXT: s_cmp_lg_u32 s3, 0
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX6-NEXT: v_mov_b32_e32 v0, s6
+; GFX6-NEXT: s_subb_u32 s3, s7, s15
+; GFX6-NEXT: v_mov_b32_e32 v1, s7
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[0:1]
+; GFX6-NEXT: v_cmp_gt_u64_e64 s[4:5], s[12:13], 0
+; GFX6-NEXT: s_cmp_lt_u32 s20, 64
+; GFX6-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX6-NEXT: s_cselect_b32 s12, 1, 0
+; GFX6-NEXT: s_cmp_eq_u32 s20, 0
+; GFX6-NEXT: v_cmp_gt_i64_e64 s[4:5], s[14:15], 0
+; GFX6-NEXT: s_cselect_b32 s13, 1, 0
+; GFX6-NEXT: s_lshr_b64 s[6:7], s[0:1], s20
+; GFX6-NEXT: s_lshl_b64 s[8:9], s[2:3], s22
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[4:5]
+; GFX6-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
+; GFX6-NEXT: s_ashr_i32 s8, s3, 31
+; GFX6-NEXT: s_ashr_i64 s[4:5], s[2:3], s20
+; GFX6-NEXT: s_ashr_i64 s[10:11], s[2:3], s21
+; GFX6-NEXT: s_and_b32 s12, s12, 1
+; GFX6-NEXT: s_cmp_lg_u32 s12, 0
+; GFX6-NEXT: s_cselect_b64 s[6:7], s[6:7], s[10:11]
+; GFX6-NEXT: s_and_b32 s10, s13, 1
+; GFX6-NEXT: s_cmp_lg_u32 s10, 0
+; GFX6-NEXT: s_cselect_b64 s[6:7], s[0:1], s[6:7]
+; GFX6-NEXT: s_mov_b32 s9, s8
+; GFX6-NEXT: s_cmp_lg_u32 s12, 0
+; GFX6-NEXT: s_cselect_b64 s[4:5], s[4:5], s[8:9]
+; GFX6-NEXT: s_add_u32 s6, s6, 0
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: s_addc_u32 s7, s7, 0
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: v_cmp_eq_u64_e64 vcc, s[14:15], 0
+; GFX6-NEXT: s_addc_u32 s4, s4, 0
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX6-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: v_mov_b32_e32 v3, s0
+; GFX6-NEXT: v_mov_b32_e32 v8, s1
+; GFX6-NEXT: s_addc_u32 s5, s5, s23
+; GFX6-NEXT: v_mov_b32_e32 v1, s6
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_mov_b32_e32 v2, s7
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v1, v8, v2, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s4
+; GFX6-NEXT: v_mov_b32_e32 v8, s2
+; GFX6-NEXT: v_mov_b32_e32 v3, s5
+; GFX6-NEXT: v_mov_b32_e32 v9, s3
+; GFX6-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
+; GFX6-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v5
+; GFX6-NEXT: v_readfirstlane_b32 s1, v4
+; GFX6-NEXT: v_readfirstlane_b32 s2, v6
+; GFX6-NEXT: v_readfirstlane_b32 s3, v7
+; GFX6-NEXT: v_readfirstlane_b32 s4, v0
+; GFX6-NEXT: v_readfirstlane_b32 s5, v1
+; GFX6-NEXT: v_readfirstlane_b32 s6, v2
+; GFX6-NEXT: v_readfirstlane_b32 s7, v3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_ssubsat_v2i128:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_sub_u32 s16, s0, s8
+; GFX8-NEXT: s_cselect_b32 s17, 1, 0
+; GFX8-NEXT: s_and_b32 s17, s17, 1
+; GFX8-NEXT: s_cmp_lg_u32 s17, 0
+; GFX8-NEXT: s_subb_u32 s17, s1, s9
+; GFX8-NEXT: s_cselect_b32 s18, 1, 0
+; GFX8-NEXT: s_and_b32 s18, s18, 1
+; GFX8-NEXT: s_cmp_lg_u32 s18, 0
+; GFX8-NEXT: s_subb_u32 s18, s2, s10
+; GFX8-NEXT: s_cselect_b32 s19, 1, 0
+; GFX8-NEXT: s_and_b32 s19, s19, 1
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: s_cmp_lg_u32 s19, 0
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: s_subb_u32 s19, s3, s11
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[16:17], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: s_cmp_eq_u64 s[18:19], s[2:3]
+; GFX8-NEXT: s_cselect_b32 s2, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[18:19], v[0:1]
+; GFX8-NEXT: s_and_b32 s0, 1, s2
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: v_cmp_gt_u64_e64 s[0:1], s[8:9], 0
+; GFX8-NEXT: s_cmp_eq_u64 s[10:11], 0
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX8-NEXT: v_cmp_gt_i64_e64 s[0:1], s[10:11], 0
+; GFX8-NEXT: s_cselect_b32 s2, 1, 0
+; GFX8-NEXT: s_movk_i32 s20, 0x7f
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX8-NEXT: s_and_b32 s0, 1, s2
+; GFX8-NEXT: s_sub_i32 s21, s20, 64
+; GFX8-NEXT: s_sub_i32 s22, 64, s20
+; GFX8-NEXT: s_cmp_lt_u32 s20, 64
+; GFX8-NEXT: s_cselect_b32 s23, 1, 0
+; GFX8-NEXT: s_cmp_eq_u32 s20, 0
+; GFX8-NEXT: s_cselect_b32 s24, 1, 0
+; GFX8-NEXT: s_lshr_b64 s[2:3], s[16:17], s20
+; GFX8-NEXT: s_lshl_b64 s[8:9], s[18:19], s22
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
+; GFX8-NEXT: s_ashr_i32 s8, s19, 31
+; GFX8-NEXT: s_ashr_i64 s[0:1], s[18:19], s20
+; GFX8-NEXT: s_ashr_i64 s[10:11], s[18:19], s21
+; GFX8-NEXT: s_and_b32 s23, s23, 1
+; GFX8-NEXT: s_cmp_lg_u32 s23, 0
+; GFX8-NEXT: s_cselect_b64 s[2:3], s[2:3], s[10:11]
+; GFX8-NEXT: s_and_b32 s10, s24, 1
+; GFX8-NEXT: s_cmp_lg_u32 s10, 0
+; GFX8-NEXT: s_cselect_b64 s[2:3], s[16:17], s[2:3]
+; GFX8-NEXT: s_cmp_lg_u32 s23, 0
+; GFX8-NEXT: s_mov_b32 s9, s8
+; GFX8-NEXT: s_cselect_b64 s[0:1], s[0:1], s[8:9]
+; GFX8-NEXT: s_add_u32 s2, s2, 0
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: s_addc_u32 s3, s3, 0
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: s_addc_u32 s0, s0, 0
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_brev_b32 s23, 1
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: s_addc_u32 s1, s1, s23
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_mov_b32_e32 v0, s0
+; GFX8-NEXT: v_mov_b32_e32 v1, s2
+; GFX8-NEXT: v_mov_b32_e32 v3, s16
+; GFX8-NEXT: s_sub_u32 s0, s4, s12
+; GFX8-NEXT: v_cndmask_b32_e32 v5, v3, v1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v1, s1
+; GFX8-NEXT: s_cselect_b32 s1, 1, 0
+; GFX8-NEXT: s_and_b32 s1, s1, 1
+; GFX8-NEXT: s_cmp_lg_u32 s1, 0
+; GFX8-NEXT: s_subb_u32 s1, s5, s13
+; GFX8-NEXT: s_cselect_b32 s2, 1, 0
+; GFX8-NEXT: s_and_b32 s2, s2, 1
+; GFX8-NEXT: s_cmp_lg_u32 s2, 0
+; GFX8-NEXT: v_mov_b32_e32 v2, s3
+; GFX8-NEXT: v_mov_b32_e32 v4, s17
+; GFX8-NEXT: s_subb_u32 s2, s6, s14
+; GFX8-NEXT: s_cselect_b32 s3, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s18
+; GFX8-NEXT: v_mov_b32_e32 v3, s19
+; GFX8-NEXT: s_and_b32 s3, s3, 1
+; GFX8-NEXT: v_cndmask_b32_e32 v6, v2, v0, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v7, v3, v1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: s_cmp_lg_u32 s3, 0
+; GFX8-NEXT: v_mov_b32_e32 v3, s5
+; GFX8-NEXT: s_subb_u32 s3, s7, s15
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v0, s6
+; GFX8-NEXT: v_mov_b32_e32 v1, s7
+; GFX8-NEXT: s_cmp_eq_u64 s[2:3], s[6:7]
+; GFX8-NEXT: s_cselect_b32 s6, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT: s_and_b32 s4, 1, s6
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX8-NEXT: v_cmp_gt_u64_e64 s[4:5], s[12:13], 0
+; GFX8-NEXT: s_cmp_eq_u64 s[14:15], 0
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX8-NEXT: v_cmp_gt_i64_e64 s[4:5], s[14:15], 0
+; GFX8-NEXT: s_cselect_b32 s6, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[4:5]
+; GFX8-NEXT: s_and_b32 s4, 1, s6
+; GFX8-NEXT: s_cmp_lt_u32 s20, 64
+; GFX8-NEXT: s_cselect_b32 s12, 1, 0
+; GFX8-NEXT: s_cmp_eq_u32 s20, 0
+; GFX8-NEXT: s_cselect_b32 s13, 1, 0
+; GFX8-NEXT: s_lshr_b64 s[6:7], s[0:1], s20
+; GFX8-NEXT: s_lshl_b64 s[8:9], s[2:3], s22
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX8-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
+; GFX8-NEXT: s_ashr_i32 s8, s3, 31
+; GFX8-NEXT: s_ashr_i64 s[4:5], s[2:3], s20
+; GFX8-NEXT: s_ashr_i64 s[10:11], s[2:3], s21
+; GFX8-NEXT: s_and_b32 s12, s12, 1
+; GFX8-NEXT: s_cmp_lg_u32 s12, 0
+; GFX8-NEXT: s_cselect_b64 s[6:7], s[6:7], s[10:11]
+; GFX8-NEXT: s_and_b32 s10, s13, 1
+; GFX8-NEXT: s_cmp_lg_u32 s10, 0
+; GFX8-NEXT: s_cselect_b64 s[6:7], s[0:1], s[6:7]
+; GFX8-NEXT: s_mov_b32 s9, s8
+; GFX8-NEXT: s_cmp_lg_u32 s12, 0
+; GFX8-NEXT: s_cselect_b64 s[4:5], s[4:5], s[8:9]
+; GFX8-NEXT: s_add_u32 s6, s6, 0
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: s_addc_u32 s7, s7, 0
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: s_addc_u32 s4, s4, 0
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX8-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: v_mov_b32_e32 v3, s0
+; GFX8-NEXT: v_mov_b32_e32 v8, s1
+; GFX8-NEXT: s_addc_u32 s5, s5, s23
+; GFX8-NEXT: v_mov_b32_e32 v1, s6
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_mov_b32_e32 v2, s7
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v1, v8, v2, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_mov_b32_e32 v8, s2
+; GFX8-NEXT: v_mov_b32_e32 v3, s5
+; GFX8-NEXT: v_mov_b32_e32 v9, s3
+; GFX8-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
+; GFX8-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v5
+; GFX8-NEXT: v_readfirstlane_b32 s1, v4
+; GFX8-NEXT: v_readfirstlane_b32 s2, v6
+; GFX8-NEXT: v_readfirstlane_b32 s3, v7
+; GFX8-NEXT: v_readfirstlane_b32 s4, v0
+; GFX8-NEXT: v_readfirstlane_b32 s5, v1
+; GFX8-NEXT: v_readfirstlane_b32 s6, v2
+; GFX8-NEXT: v_readfirstlane_b32 s7, v3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_ssubsat_v2i128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_sub_u32 s16, s0, s8
+; GFX9-NEXT: s_cselect_b32 s17, 1, 0
+; GFX9-NEXT: s_and_b32 s17, s17, 1
+; GFX9-NEXT: s_cmp_lg_u32 s17, 0
+; GFX9-NEXT: s_subb_u32 s17, s1, s9
+; GFX9-NEXT: s_cselect_b32 s18, 1, 0
+; GFX9-NEXT: s_and_b32 s18, s18, 1
+; GFX9-NEXT: s_cmp_lg_u32 s18, 0
+; GFX9-NEXT: s_subb_u32 s18, s2, s10
+; GFX9-NEXT: s_cselect_b32 s19, 1, 0
+; GFX9-NEXT: s_and_b32 s19, s19, 1
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: s_cmp_lg_u32 s19, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s0
+; GFX9-NEXT: s_subb_u32 s19, s3, s11
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[16:17], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: s_cmp_eq_u64 s[18:19], s[2:3]
+; GFX9-NEXT: s_cselect_b32 s2, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[18:19], v[0:1]
+; GFX9-NEXT: s_and_b32 s0, 1, s2
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: v_cmp_gt_u64_e64 s[0:1], s[8:9], 0
+; GFX9-NEXT: s_cmp_eq_u64 s[10:11], 0
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[0:1]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[0:1], s[10:11], 0
+; GFX9-NEXT: s_cselect_b32 s2, 1, 0
+; GFX9-NEXT: s_movk_i32 s20, 0x7f
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[0:1]
+; GFX9-NEXT: s_and_b32 s0, 1, s2
+; GFX9-NEXT: s_sub_i32 s21, s20, 64
+; GFX9-NEXT: s_sub_i32 s22, 64, s20
+; GFX9-NEXT: s_cmp_lt_u32 s20, 64
+; GFX9-NEXT: s_cselect_b32 s23, 1, 0
+; GFX9-NEXT: s_cmp_eq_u32 s20, 0
+; GFX9-NEXT: s_cselect_b32 s24, 1, 0
+; GFX9-NEXT: s_lshr_b64 s[2:3], s[16:17], s20
+; GFX9-NEXT: s_lshl_b64 s[8:9], s[18:19], s22
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: s_or_b64 s[2:3], s[2:3], s[8:9]
+; GFX9-NEXT: s_ashr_i32 s8, s19, 31
+; GFX9-NEXT: s_ashr_i64 s[0:1], s[18:19], s20
+; GFX9-NEXT: s_ashr_i64 s[10:11], s[18:19], s21
+; GFX9-NEXT: s_and_b32 s23, s23, 1
+; GFX9-NEXT: s_cmp_lg_u32 s23, 0
+; GFX9-NEXT: s_cselect_b64 s[2:3], s[2:3], s[10:11]
+; GFX9-NEXT: s_and_b32 s10, s24, 1
+; GFX9-NEXT: s_cmp_lg_u32 s10, 0
+; GFX9-NEXT: s_cselect_b64 s[2:3], s[16:17], s[2:3]
+; GFX9-NEXT: s_cmp_lg_u32 s23, 0
+; GFX9-NEXT: s_mov_b32 s9, s8
+; GFX9-NEXT: s_cselect_b64 s[0:1], s[0:1], s[8:9]
+; GFX9-NEXT: s_add_u32 s2, s2, 0
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: s_addc_u32 s3, s3, 0
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: s_addc_u32 s0, s0, 0
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX9-NEXT: s_brev_b32 s23, 1
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: s_addc_u32 s1, s1, s23
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: v_mov_b32_e32 v1, s2
+; GFX9-NEXT: v_mov_b32_e32 v3, s16
+; GFX9-NEXT: s_sub_u32 s0, s4, s12
+; GFX9-NEXT: v_cndmask_b32_e32 v5, v3, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
+; GFX9-NEXT: s_cselect_b32 s1, 1, 0
+; GFX9-NEXT: s_and_b32 s1, s1, 1
+; GFX9-NEXT: s_cmp_lg_u32 s1, 0
+; GFX9-NEXT: s_subb_u32 s1, s5, s13
+; GFX9-NEXT: s_cselect_b32 s2, 1, 0
+; GFX9-NEXT: s_and_b32 s2, s2, 1
+; GFX9-NEXT: s_cmp_lg_u32 s2, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s3
+; GFX9-NEXT: v_mov_b32_e32 v4, s17
+; GFX9-NEXT: s_subb_u32 s2, s6, s14
+; GFX9-NEXT: s_cselect_b32 s3, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v4, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v2, s18
+; GFX9-NEXT: v_mov_b32_e32 v3, s19
+; GFX9-NEXT: s_and_b32 s3, s3, 1
+; GFX9-NEXT: v_cndmask_b32_e32 v6, v2, v0, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v7, v3, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: s_cmp_lg_u32 s3, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: s_subb_u32 s3, s7, s15
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-NEXT: s_cmp_eq_u64 s[2:3], s[6:7]
+; GFX9-NEXT: s_cselect_b32 s6, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_i64_e32 vcc, s[2:3], v[0:1]
+; GFX9-NEXT: s_and_b32 s4, 1, s6
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX9-NEXT: v_cmp_gt_u64_e64 s[4:5], s[12:13], 0
+; GFX9-NEXT: s_cmp_eq_u64 s[14:15], 0
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, s[4:5]
+; GFX9-NEXT: v_cmp_gt_i64_e64 s[4:5], s[14:15], 0
+; GFX9-NEXT: s_cselect_b32 s6, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, s[4:5]
+; GFX9-NEXT: s_and_b32 s4, 1, s6
+; GFX9-NEXT: s_cmp_lt_u32 s20, 64
+; GFX9-NEXT: s_cselect_b32 s12, 1, 0
+; GFX9-NEXT: s_cmp_eq_u32 s20, 0
+; GFX9-NEXT: s_cselect_b32 s13, 1, 0
+; GFX9-NEXT: s_lshr_b64 s[6:7], s[0:1], s20
+; GFX9-NEXT: s_lshl_b64 s[8:9], s[2:3], s22
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX9-NEXT: s_or_b64 s[6:7], s[6:7], s[8:9]
+; GFX9-NEXT: s_ashr_i32 s8, s3, 31
+; GFX9-NEXT: s_ashr_i64 s[4:5], s[2:3], s20
+; GFX9-NEXT: s_ashr_i64 s[10:11], s[2:3], s21
+; GFX9-NEXT: s_and_b32 s12, s12, 1
+; GFX9-NEXT: s_cmp_lg_u32 s12, 0
+; GFX9-NEXT: s_cselect_b64 s[6:7], s[6:7], s[10:11]
+; GFX9-NEXT: s_and_b32 s10, s13, 1
+; GFX9-NEXT: s_cmp_lg_u32 s10, 0
+; GFX9-NEXT: s_cselect_b64 s[6:7], s[0:1], s[6:7]
+; GFX9-NEXT: s_mov_b32 s9, s8
+; GFX9-NEXT: s_cmp_lg_u32 s12, 0
+; GFX9-NEXT: s_cselect_b64 s[4:5], s[4:5], s[8:9]
+; GFX9-NEXT: s_add_u32 s6, s6, 0
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: s_addc_u32 s4, s4, 0
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc
+; GFX9-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_mov_b32_e32 v3, s0
+; GFX9-NEXT: v_mov_b32_e32 v8, s1
+; GFX9-NEXT: s_addc_u32 s5, s5, s23
+; GFX9-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, s7
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v3, v1, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v1, v8, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v8, s2
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: v_mov_b32_e32 v9, s3
+; GFX9-NEXT: v_cndmask_b32_e32 v2, v8, v2, vcc
+; GFX9-NEXT: v_cndmask_b32_e32 v3, v9, v3, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v5
+; GFX9-NEXT: v_readfirstlane_b32 s1, v4
+; GFX9-NEXT: v_readfirstlane_b32 s2, v6
+; GFX9-NEXT: v_readfirstlane_b32 s3, v7
+; GFX9-NEXT: v_readfirstlane_b32 s4, v0
+; GFX9-NEXT: v_readfirstlane_b32 s5, v1
+; GFX9-NEXT: v_readfirstlane_b32 s6, v2
+; GFX9-NEXT: v_readfirstlane_b32 s7, v3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_ssubsat_v2i128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_sub_u32 s28, s0, s8
+; GFX10-NEXT: s_cselect_b32 s17, 1, 0
+; GFX10-NEXT: s_mov_b32 s46, s0
+; GFX10-NEXT: s_and_b32 s17, s17, 1
+; GFX10-NEXT: s_mov_b32 s47, s1
+; GFX10-NEXT: s_cmp_lg_u32 s17, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_subb_u32 s29, s1, s9
+; GFX10-NEXT: s_cselect_b32 s18, 1, 0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s0, s[28:29], s[46:47]
+; GFX10-NEXT: s_and_b32 s18, s18, 1
+; GFX10-NEXT: s_cmp_lg_u32 s18, 0
+; GFX10-NEXT: s_subb_u32 s30, s2, s10
+; GFX10-NEXT: s_cselect_b32 s19, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX10-NEXT: s_and_b32 s19, s19, 1
+; GFX10-NEXT: s_cmp_lg_u32 s19, 0
+; GFX10-NEXT: s_subb_u32 s31, s3, s11
+; GFX10-NEXT: v_cmp_lt_i64_e64 s0, s[30:31], s[2:3]
+; GFX10-NEXT: s_cmp_eq_u64 s[30:31], s[2:3]
+; GFX10-NEXT: v_cmp_gt_u64_e64 s2, s[8:9], 0
+; GFX10-NEXT: s_cselect_b32 s20, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s0
+; GFX10-NEXT: s_and_b32 s0, 1, s20
+; GFX10-NEXT: s_cmp_eq_u64 s[10:11], 0
+; GFX10-NEXT: s_movk_i32 s20, 0x7f
+; GFX10-NEXT: s_cselect_b32 s1, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, s2
+; GFX10-NEXT: v_cmp_gt_i64_e64 s2, s[10:11], 0
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
+; GFX10-NEXT: s_and_b32 s1, 1, s1
+; GFX10-NEXT: s_sub_i32 s21, s20, 64
+; GFX10-NEXT: s_sub_i32 s22, 64, s20
+; GFX10-NEXT: s_cmp_lt_u32 s20, 64
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX10-NEXT: s_cselect_b32 s10, 1, 0
+; GFX10-NEXT: s_cmp_eq_u32 s20, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v3, 0, 1, s2
+; GFX10-NEXT: s_cselect_b32 s23, 1, 0
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s1
+; GFX10-NEXT: s_lshr_b64 s[0:1], s[28:29], s20
+; GFX10-NEXT: s_lshl_b64 s[2:3], s[30:31], s22
+; GFX10-NEXT: s_and_b32 s24, s10, 1
+; GFX10-NEXT: s_or_b64 s[0:1], s[0:1], s[2:3]
+; GFX10-NEXT: s_ashr_i32 s2, s31, 31
+; GFX10-NEXT: s_ashr_i64 s[8:9], s[30:31], s20
+; GFX10-NEXT: s_ashr_i64 s[10:11], s[30:31], s21
+; GFX10-NEXT: s_cmp_lg_u32 s24, 0
+; GFX10-NEXT: s_mov_b32 s3, s2
+; GFX10-NEXT: s_cselect_b64 s[0:1], s[0:1], s[10:11]
+; GFX10-NEXT: s_and_b32 s10, s23, 1
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v3, v2, vcc_lo
+; GFX10-NEXT: s_cmp_lg_u32 s10, 0
+; GFX10-NEXT: v_mov_b32_e32 v2, s29
+; GFX10-NEXT: s_cselect_b64 s[0:1], s[28:29], s[0:1]
+; GFX10-NEXT: s_cmp_lg_u32 s24, 0
+; GFX10-NEXT: v_xor_b32_e32 v0, v1, v0
+; GFX10-NEXT: s_cselect_b64 s[2:3], s[8:9], s[2:3]
+; GFX10-NEXT: s_add_u32 s0, s0, 0
+; GFX10-NEXT: s_cselect_b32 s8, 1, 0
+; GFX10-NEXT: v_mov_b32_e32 v1, s28
+; GFX10-NEXT: s_and_b32 s8, s8, 1
+; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX10-NEXT: s_cmp_lg_u32 s8, 0
+; GFX10-NEXT: s_brev_b32 s23, 1
+; GFX10-NEXT: s_addc_u32 s1, s1, 0
+; GFX10-NEXT: s_cselect_b32 s8, 1, 0
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT: s_and_b32 s8, s8, 1
+; GFX10-NEXT: v_mov_b32_e32 v3, s31
+; GFX10-NEXT: s_cmp_lg_u32 s8, 0
+; GFX10-NEXT: s_addc_u32 s2, s2, 0
+; GFX10-NEXT: s_cselect_b32 s8, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v1, s0, vcc_lo
+; GFX10-NEXT: s_and_b32 s8, s8, 1
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v2, s1, vcc_lo
+; GFX10-NEXT: s_cmp_lg_u32 s8, 0
+; GFX10-NEXT: v_mov_b32_e32 v2, s30
+; GFX10-NEXT: s_addc_u32 s3, s3, s23
+; GFX10-NEXT: s_sub_u32 s0, s4, s12
+; GFX10-NEXT: s_cselect_b32 s1, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, s3, vcc_lo
+; GFX10-NEXT: s_and_b32 s1, s1, 1
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, s2, vcc_lo
+; GFX10-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10-NEXT: s_subb_u32 s1, s5, s13
+; GFX10-NEXT: s_cselect_b32 s8, 1, 0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s3, s[0:1], s[4:5]
+; GFX10-NEXT: s_and_b32 s8, s8, 1
+; GFX10-NEXT: s_cmp_lg_u32 s8, 0
+; GFX10-NEXT: s_subb_u32 s8, s6, s14
+; GFX10-NEXT: s_cselect_b32 s9, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v4, 0, 1, s3
+; GFX10-NEXT: s_and_b32 s9, s9, 1
+; GFX10-NEXT: v_mov_b32_e32 v7, s8
+; GFX10-NEXT: s_cmp_lg_u32 s9, 0
+; GFX10-NEXT: s_subb_u32 s9, s7, s15
+; GFX10-NEXT: s_cmp_eq_u64 s[8:9], s[6:7]
+; GFX10-NEXT: v_cmp_lt_i64_e64 s3, s[8:9], s[6:7]
+; GFX10-NEXT: s_cselect_b32 s2, 1, 0
+; GFX10-NEXT: v_mov_b32_e32 v8, s9
+; GFX10-NEXT: s_and_b32 s2, 1, s2
+; GFX10-NEXT: s_cmp_eq_u64 s[14:15], 0
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s2
+; GFX10-NEXT: v_cmp_gt_u64_e64 s2, s[12:13], 0
+; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s3
+; GFX10-NEXT: s_cselect_b32 s3, 1, 0
+; GFX10-NEXT: s_and_b32 s16, 1, s3
+; GFX10-NEXT: s_cmp_lt_u32 s20, 64
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, s2
+; GFX10-NEXT: v_cmp_gt_i64_e64 s2, s[14:15], 0
+; GFX10-NEXT: s_cselect_b32 s10, 1, 0
+; GFX10-NEXT: s_cmp_eq_u32 s20, 0
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s16
+; GFX10-NEXT: s_cselect_b32 s12, 1, 0
+; GFX10-NEXT: s_lshl_b64 s[4:5], s[8:9], s22
+; GFX10-NEXT: v_cndmask_b32_e64 v6, 0, 1, s2
+; GFX10-NEXT: s_lshr_b64 s[2:3], s[0:1], s20
+; GFX10-NEXT: s_and_b32 s13, s10, 1
+; GFX10-NEXT: s_or_b64 s[2:3], s[2:3], s[4:5]
+; GFX10-NEXT: s_ashr_i32 s4, s9, 31
+; GFX10-NEXT: s_ashr_i64 s[6:7], s[8:9], s20
+; GFX10-NEXT: s_ashr_i64 s[10:11], s[8:9], s21
+; GFX10-NEXT: s_cmp_lg_u32 s13, 0
+; GFX10-NEXT: s_mov_b32 s5, s4
+; GFX10-NEXT: s_cselect_b64 s[2:3], s[2:3], s[10:11]
+; GFX10-NEXT: s_and_b32 s10, s12, 1
+; GFX10-NEXT: v_cndmask_b32_e32 v5, v6, v5, vcc_lo
+; GFX10-NEXT: s_cmp_lg_u32 s10, 0
+; GFX10-NEXT: v_mov_b32_e32 v6, s1
+; GFX10-NEXT: s_cselect_b64 s[2:3], s[0:1], s[2:3]
+; GFX10-NEXT: s_cmp_lg_u32 s13, 0
+; GFX10-NEXT: v_xor_b32_e32 v4, v5, v4
+; GFX10-NEXT: s_cselect_b64 s[4:5], s[6:7], s[4:5]
+; GFX10-NEXT: s_add_u32 s2, s2, 0
+; GFX10-NEXT: s_cselect_b32 s6, 1, 0
+; GFX10-NEXT: v_mov_b32_e32 v5, s0
+; GFX10-NEXT: s_and_b32 s6, s6, 1
+; GFX10-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX10-NEXT: s_cmp_lg_u32 s6, 0
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: s_addc_u32 s3, s3, 0
+; GFX10-NEXT: s_cselect_b32 s6, 1, 0
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
+; GFX10-NEXT: s_and_b32 s6, s6, 1
+; GFX10-NEXT: s_cmp_lg_u32 s6, 0
+; GFX10-NEXT: s_addc_u32 s4, s4, 0
+; GFX10-NEXT: s_cselect_b32 s6, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v5, s2, vcc_lo
+; GFX10-NEXT: s_and_b32 s6, s6, 1
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v6, s3, vcc_lo
+; GFX10-NEXT: s_cmp_lg_u32 s6, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v7, s4, vcc_lo
+; GFX10-NEXT: s_addc_u32 s1, s5, s23
+; GFX10-NEXT: v_readfirstlane_b32 s2, v2
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v8, s1, vcc_lo
+; GFX10-NEXT: v_readfirstlane_b32 s1, v1
+; GFX10-NEXT: v_readfirstlane_b32 s3, v3
+; GFX10-NEXT: v_readfirstlane_b32 s4, v4
+; GFX10-NEXT: v_readfirstlane_b32 s5, v5
+; GFX10-NEXT: v_readfirstlane_b32 s6, v6
+; GFX10-NEXT: v_readfirstlane_b32 s7, v7
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128> %lhs, <2 x i128> %rhs)
+ ret <2 x i128> %result
+}
+
+declare i7 @llvm.ssub.sat.i7(i7, i7) #0
+declare i8 @llvm.ssub.sat.i8(i8, i8) #0
+declare <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8>, <2 x i8>) #0
+declare <4 x i8> @llvm.ssub.sat.v4i8(<4 x i8>, <4 x i8>) #0
+
+declare i16 @llvm.ssub.sat.i16(i16, i16) #0
+declare <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16>, <2 x i16>) #0
+declare <3 x i16> @llvm.ssub.sat.v3i16(<3 x i16>, <3 x i16>) #0
+declare <4 x i16> @llvm.ssub.sat.v4i16(<4 x i16>, <4 x i16>) #0
+declare <5 x i16> @llvm.ssub.sat.v5i16(<5 x i16>, <5 x i16>) #0
+declare <6 x i16> @llvm.ssub.sat.v6i16(<6 x i16>, <6 x i16>) #0
+declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) #0
+
+declare i24 @llvm.ssub.sat.i24(i24, i24) #0
+
+declare i32 @llvm.ssub.sat.i32(i32, i32) #0
+declare <2 x i32> @llvm.ssub.sat.v2i32(<2 x i32>, <2 x i32>) #0
+declare <3 x i32> @llvm.ssub.sat.v3i32(<3 x i32>, <3 x i32>) #0
+declare <4 x i32> @llvm.ssub.sat.v4i32(<4 x i32>, <4 x i32>) #0
+declare <5 x i32> @llvm.ssub.sat.v5i32(<5 x i32>, <5 x i32>) #0
+declare <16 x i32> @llvm.ssub.sat.v16i32(<16 x i32>, <16 x i32>) #0
+
+declare i48 @llvm.ssub.sat.i48(i48, i48) #0
+
+declare i64 @llvm.ssub.sat.i64(i64, i64) #0
+declare <2 x i64> @llvm.ssub.sat.v2i64(<2 x i64>, <2 x i64>) #0
+
+declare i128 @llvm.ssub.sat.i128(i128, i128) #0
+declare <2 x i128> @llvm.ssub.sat.v2i128(<2 x i128>, <2 x i128>) #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
new file mode 100644
index 000000000000..413bc7103847
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/uaddsat.ll
@@ -0,0 +1,4979 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti -o - %s | FileCheck -check-prefix=GFX6 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=fiji -o - %s | FileCheck -check-prefix=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -o - %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -o - %s | FileCheck -check-prefix=GFX10 %s
+
+define i7 @v_uaddsat_i7(i7 %lhs, i7 %rhs) {
+; GFX6-LABEL: v_uaddsat_i7:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 25, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 25, v1
+; GFX6-NEXT: v_xor_b32_e32 v2, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v1, v2, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 25, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_i7:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 9, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 9, v1
+; GFX8-NEXT: v_xor_b32_e32 v2, 0xffff, v0
+; GFX8-NEXT: v_min_u16_e32 v1, v2, v1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_lshrrev_b16_e32 v0, 9, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_i7:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 9, v0
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 9, v1
+; GFX9-NEXT: v_xor_b32_e32 v2, 0xffff, v0
+; GFX9-NEXT: v_min_u16_e32 v1, v2, v1
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_lshrrev_b16_e32 v0, 9, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_i7:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_lshlrev_b16_e64 v0, 9, v0
+; GFX10-NEXT: v_lshlrev_b16_e64 v1, 9, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_xor_b32_e32 v2, 0xffff, v0
+; GFX10-NEXT: v_min_u16_e64 v1, v2, v1
+; GFX10-NEXT: v_add_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b16_e64 v0, 9, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i7 @llvm.uadd.sat.i7(i7 %lhs, i7 %rhs)
+ ret i7 %result
+}
+
+define amdgpu_ps i7 @s_uaddsat_i7(i7 inreg %lhs, i7 inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_i7:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 25
+; GFX6-NEXT: s_lshl_b32 s1, s1, 25
+; GFX6-NEXT: s_not_b32 s2, s0
+; GFX6-NEXT: s_cmp_lt_u32 s2, s1
+; GFX6-NEXT: s_cselect_b32 s1, s2, s1
+; GFX6-NEXT: s_add_i32 s0, s0, s1
+; GFX6-NEXT: s_lshr_b32 s0, s0, 25
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_i7:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s2, 9, 0x100000
+; GFX8-NEXT: s_lshl_b32 s0, s0, s2
+; GFX8-NEXT: s_lshl_b32 s1, s1, s2
+; GFX8-NEXT: s_xor_b32 s3, s0, 0xffff
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s3, s1
+; GFX8-NEXT: s_cselect_b32 s1, s3, s1
+; GFX8-NEXT: s_add_i32 s0, s0, s1
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshr_b32 s0, s0, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_i7:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s2, 9, 0x100000
+; GFX9-NEXT: s_lshl_b32 s0, s0, s2
+; GFX9-NEXT: s_lshl_b32 s1, s1, s2
+; GFX9-NEXT: s_xor_b32 s3, s0, 0xffff
+; GFX9-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s3, s1
+; GFX9-NEXT: s_cselect_b32 s1, s3, s1
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX9-NEXT: s_lshr_b32 s0, s0, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_i7:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s2, 9, 0x100000
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_lshl_b32 s0, s0, s2
+; GFX10-NEXT: s_lshl_b32 s1, s1, s2
+; GFX10-NEXT: s_xor_b32 s3, s0, 0xffff
+; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX10-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX10-NEXT: s_cmp_lt_u32 s3, s1
+; GFX10-NEXT: s_cselect_b32 s1, s3, s1
+; GFX10-NEXT: s_add_i32 s0, s0, s1
+; GFX10-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX10-NEXT: s_lshr_b32 s0, s0, s2
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i7 @llvm.uadd.sat.i7(i7 %lhs, i7 %rhs)
+ ret i7 %result
+}
+
+define i8 @v_uaddsat_i8(i8 %lhs, i8 %rhs) {
+; GFX6-LABEL: v_uaddsat_i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_xor_b32_e32 v2, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v1, v2, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_xor_b32_e32 v2, 0xffff, v0
+; GFX8-NEXT: v_min_u16_e32 v1, v2, v1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_lshrrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_xor_b32_e32 v2, 0xffff, v0
+; GFX9-NEXT: v_min_u16_e32 v1, v2, v1
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_lshrrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_lshlrev_b16_e64 v0, 8, v0
+; GFX10-NEXT: v_lshlrev_b16_e64 v1, 8, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_xor_b32_e32 v2, 0xffff, v0
+; GFX10-NEXT: v_min_u16_e64 v1, v2, v1
+; GFX10-NEXT: v_add_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b16_e64 v0, 8, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i8 @llvm.uadd.sat.i8(i8 %lhs, i8 %rhs)
+ ret i8 %result
+}
+
+define amdgpu_ps i8 @s_uaddsat_i8(i8 inreg %lhs, i8 inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_not_b32 s2, s0
+; GFX6-NEXT: s_cmp_lt_u32 s2, s1
+; GFX6-NEXT: s_cselect_b32 s1, s2, s1
+; GFX6-NEXT: s_add_i32 s0, s0, s1
+; GFX6-NEXT: s_lshr_b32 s0, s0, 24
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX8-NEXT: s_lshl_b32 s0, s0, s2
+; GFX8-NEXT: s_lshl_b32 s1, s1, s2
+; GFX8-NEXT: s_xor_b32 s3, s0, 0xffff
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s3, s1
+; GFX8-NEXT: s_cselect_b32 s1, s3, s1
+; GFX8-NEXT: s_add_i32 s0, s0, s1
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshr_b32 s0, s0, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX9-NEXT: s_lshl_b32 s0, s0, s2
+; GFX9-NEXT: s_lshl_b32 s1, s1, s2
+; GFX9-NEXT: s_xor_b32 s3, s0, 0xffff
+; GFX9-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s3, s1
+; GFX9-NEXT: s_cselect_b32 s1, s3, s1
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX9-NEXT: s_lshr_b32 s0, s0, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_lshl_b32 s0, s0, s2
+; GFX10-NEXT: s_lshl_b32 s1, s1, s2
+; GFX10-NEXT: s_xor_b32 s3, s0, 0xffff
+; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX10-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX10-NEXT: s_cmp_lt_u32 s3, s1
+; GFX10-NEXT: s_cselect_b32 s1, s3, s1
+; GFX10-NEXT: s_add_i32 s0, s0, s1
+; GFX10-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX10-NEXT: s_lshr_b32 s0, s0, s2
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i8 @llvm.uadd.sat.i8(i8 %lhs, i8 %rhs)
+ ret i8 %result
+}
+
+define i16 @v_uaddsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
+; GFX6-LABEL: v_uaddsat_v2i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 8, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 8, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_xor_b32_e32 v4, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v1, v4, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 24, v3
+; GFX6-NEXT: v_xor_b32_e32 v3, -1, v1
+; GFX6-NEXT: v_min_u32_e32 v2, v3, v2
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_mov_b32_e32 v2, 0xff
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: v_and_b32_e32 v1, v1, v2
+; GFX6-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_v2i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, 8
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v2, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: s_mov_b32 s4, 0xffff
+; GFX8-NEXT: v_lshrrev_b32_sdwa v2, v2, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_xor_b32_e32 v4, s4, v0
+; GFX8-NEXT: v_min_u16_e32 v1, v4, v1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_xor_b32_e32 v1, s4, v3
+; GFX8-NEXT: v_min_u16_e32 v1, v1, v2
+; GFX8-NEXT: v_mov_b32_e32 v2, 0xff
+; GFX8-NEXT: v_add_u16_e32 v1, v3, v1
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_sdwa v1, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_v2i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 8
+; GFX9-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_lshrrev_b32_sdwa v3, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: s_mov_b32 s4, 0xffff
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_xor_b32_e32 v4, s4, v0
+; GFX9-NEXT: v_min_u16_e32 v1, v4, v1
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_xor_b32_e32 v1, s4, v2
+; GFX9-NEXT: v_min_u16_e32 v1, v1, v3
+; GFX9-NEXT: s_movk_i32 s4, 0xff
+; GFX9-NEXT: v_add_u16_e32 v1, v2, v1
+; GFX9-NEXT: v_and_b32_sdwa v0, v0, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_v2i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_mov_b32 s4, 8
+; GFX10-NEXT: v_lshlrev_b16_e64 v2, 8, v0
+; GFX10-NEXT: v_lshrrev_b32_sdwa v0, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: s_mov_b32 s5, 0xffff
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_lshlrev_b16_e64 v1, 8, v1
+; GFX10-NEXT: v_xor_b32_e32 v4, s5, v2
+; GFX10-NEXT: v_xor_b32_e32 v5, s5, v0
+; GFX10-NEXT: s_movk_i32 s4, 0xff
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_u16_e64 v1, v4, v1
+; GFX10-NEXT: v_min_u16_e64 v3, v5, v3
+; GFX10-NEXT: v_add_nc_u16_e64 v1, v2, v1
+; GFX10-NEXT: v_add_nc_u16_e64 v0, v0, v3
+; GFX10-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_b32_sdwa v0, v0, s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %lhs = bitcast i16 %lhs.arg to <2 x i8>
+ %rhs = bitcast i16 %rhs.arg to <2 x i8>
+ %result = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %lhs, <2 x i8> %rhs)
+ %cast.result = bitcast <2 x i8> %result to i16
+ ret i16 %cast.result
+}
+
+define amdgpu_ps i16 @s_uaddsat_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg) {
+; GFX6-LABEL: s_uaddsat_v2i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshr_b32 s2, s0, 8
+; GFX6-NEXT: s_lshl_b32 s0, s0, 24
+; GFX6-NEXT: s_lshr_b32 s3, s1, 8
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_not_b32 s4, s0
+; GFX6-NEXT: s_cmp_lt_u32 s4, s1
+; GFX6-NEXT: s_cselect_b32 s1, s4, s1
+; GFX6-NEXT: s_add_i32 s0, s0, s1
+; GFX6-NEXT: s_lshl_b32 s1, s2, 24
+; GFX6-NEXT: s_lshl_b32 s2, s3, 24
+; GFX6-NEXT: s_lshr_b32 s0, s0, 24
+; GFX6-NEXT: s_not_b32 s3, s1
+; GFX6-NEXT: s_cmp_lt_u32 s3, s2
+; GFX6-NEXT: s_cselect_b32 s2, s3, s2
+; GFX6-NEXT: s_add_i32 s1, s1, s2
+; GFX6-NEXT: s_movk_i32 s2, 0xff
+; GFX6-NEXT: s_lshr_b32 s1, s1, 24
+; GFX6-NEXT: s_and_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s0, s0, s2
+; GFX6-NEXT: s_lshl_b32 s1, s1, 8
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_v2i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s4, 8, 0x100000
+; GFX8-NEXT: s_lshr_b32 s2, s0, 8
+; GFX8-NEXT: s_lshl_b32 s0, s0, s4
+; GFX8-NEXT: s_mov_b32 s5, 0xffff
+; GFX8-NEXT: s_xor_b32 s6, s0, s5
+; GFX8-NEXT: s_lshr_b32 s3, s1, 8
+; GFX8-NEXT: s_lshl_b32 s1, s1, s4
+; GFX8-NEXT: s_bfe_u32 s6, s6, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s6, s1
+; GFX8-NEXT: s_cselect_b32 s1, s6, s1
+; GFX8-NEXT: s_add_i32 s0, s0, s1
+; GFX8-NEXT: s_lshl_b32 s1, s2, s4
+; GFX8-NEXT: s_lshl_b32 s2, s3, s4
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_xor_b32 s3, s1, s5
+; GFX8-NEXT: s_lshr_b32 s0, s0, s4
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s3, s2
+; GFX8-NEXT: s_cselect_b32 s2, s3, s2
+; GFX8-NEXT: s_add_i32 s1, s1, s2
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_movk_i32 s2, 0xff
+; GFX8-NEXT: s_lshr_b32 s1, s1, s4
+; GFX8-NEXT: s_and_b32 s1, s1, s2
+; GFX8-NEXT: s_and_b32 s0, s0, s2
+; GFX8-NEXT: s_lshl_b32 s1, s1, s4
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_v2i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s4, 8, 0x100000
+; GFX9-NEXT: s_lshr_b32 s2, s0, 8
+; GFX9-NEXT: s_lshl_b32 s0, s0, s4
+; GFX9-NEXT: s_mov_b32 s5, 0xffff
+; GFX9-NEXT: s_xor_b32 s6, s0, s5
+; GFX9-NEXT: s_lshr_b32 s3, s1, 8
+; GFX9-NEXT: s_lshl_b32 s1, s1, s4
+; GFX9-NEXT: s_bfe_u32 s6, s6, 0x100000
+; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s6, s1
+; GFX9-NEXT: s_cselect_b32 s1, s6, s1
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: s_lshl_b32 s1, s2, s4
+; GFX9-NEXT: s_lshl_b32 s2, s3, s4
+; GFX9-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX9-NEXT: s_xor_b32 s3, s1, s5
+; GFX9-NEXT: s_lshr_b32 s0, s0, s4
+; GFX9-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX9-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s3, s2
+; GFX9-NEXT: s_cselect_b32 s2, s3, s2
+; GFX9-NEXT: s_add_i32 s1, s1, s2
+; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX9-NEXT: s_movk_i32 s2, 0xff
+; GFX9-NEXT: s_lshr_b32 s1, s1, s4
+; GFX9-NEXT: s_and_b32 s1, s1, s2
+; GFX9-NEXT: s_and_b32 s0, s0, s2
+; GFX9-NEXT: s_lshl_b32 s1, s1, s4
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_v2i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX10-NEXT: s_mov_b32 s4, 0xffff
+; GFX10-NEXT: s_lshl_b32 s3, s0, s2
+; GFX10-NEXT: s_lshl_b32 s6, s1, s2
+; GFX10-NEXT: s_xor_b32 s5, s3, s4
+; GFX10-NEXT: s_bfe_u32 s6, s6, 0x100000
+; GFX10-NEXT: s_bfe_u32 s5, s5, 0x100000
+; GFX10-NEXT: s_lshr_b32 s0, s0, 8
+; GFX10-NEXT: s_lshr_b32 s1, s1, 8
+; GFX10-NEXT: s_cmp_lt_u32 s5, s6
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s5, s5, s6
+; GFX10-NEXT: s_lshl_b32 s0, s0, s2
+; GFX10-NEXT: s_add_i32 s3, s3, s5
+; GFX10-NEXT: s_xor_b32 s4, s0, s4
+; GFX10-NEXT: s_lshl_b32 s1, s1, s2
+; GFX10-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX10-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX10-NEXT: s_lshr_b32 s3, s3, s2
+; GFX10-NEXT: s_cmp_lt_u32 s4, s1
+; GFX10-NEXT: s_cselect_b32 s1, s4, s1
+; GFX10-NEXT: s_add_i32 s0, s0, s1
+; GFX10-NEXT: s_movk_i32 s1, 0xff
+; GFX10-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX10-NEXT: s_lshr_b32 s0, s0, s2
+; GFX10-NEXT: s_and_b32 s0, s0, s1
+; GFX10-NEXT: s_and_b32 s1, s3, s1
+; GFX10-NEXT: s_lshl_b32 s0, s0, s2
+; GFX10-NEXT: s_or_b32 s0, s1, s0
+; GFX10-NEXT: ; return to shader part epilog
+ %lhs = bitcast i16 %lhs.arg to <2 x i8>
+ %rhs = bitcast i16 %rhs.arg to <2 x i8>
+ %result = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> %lhs, <2 x i8> %rhs)
+ %cast.result = bitcast <2 x i8> %result to i16
+ ret i16 %cast.result
+}
+
+define i32 @v_uaddsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
+; GFX6-LABEL: v_uaddsat_v4i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 8, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v4, 24, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v5, 8, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v7, 24, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_xor_b32_e32 v8, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v1, v8, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 24, v5
+; GFX6-NEXT: v_xor_b32_e32 v5, -1, v1
+; GFX6-NEXT: v_min_u32_e32 v2, v5, v2
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 24, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 24, v6
+; GFX6-NEXT: v_xor_b32_e32 v5, -1, v2
+; GFX6-NEXT: v_min_u32_e32 v3, v5, v3
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 24, v4
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: s_movk_i32 s4, 0xff
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 24, v7
+; GFX6-NEXT: v_xor_b32_e32 v5, -1, v3
+; GFX6-NEXT: v_min_u32_e32 v4, v5, v4
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v4
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 24, v3
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_v4i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, 8
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v2, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: s_mov_b32 s4, 0xffff
+; GFX8-NEXT: v_lshrrev_b32_sdwa v2, v2, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v1
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_xor_b32_e32 v8, s4, v0
+; GFX8-NEXT: v_min_u16_e32 v1, v8, v1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_xor_b32_e32 v1, s4, v3
+; GFX8-NEXT: v_min_u16_e32 v1, v1, v2
+; GFX8-NEXT: v_lshlrev_b16_e32 v2, 8, v4
+; GFX8-NEXT: v_add_u16_e32 v1, v3, v1
+; GFX8-NEXT: v_lshlrev_b16_e32 v3, 8, v6
+; GFX8-NEXT: v_xor_b32_e32 v4, s4, v2
+; GFX8-NEXT: v_min_u16_e32 v3, v4, v3
+; GFX8-NEXT: v_add_u16_e32 v2, v2, v3
+; GFX8-NEXT: v_lshlrev_b16_e32 v3, 8, v5
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v7
+; GFX8-NEXT: v_xor_b32_e32 v5, s4, v3
+; GFX8-NEXT: v_min_u16_e32 v4, v5, v4
+; GFX8-NEXT: v_add_u16_e32 v3, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX8-NEXT: v_and_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_v4i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 8
+; GFX9-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_lshrrev_b32_sdwa v5, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: s_mov_b32 s4, 0xffff
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_xor_b32_e32 v8, s4, v0
+; GFX9-NEXT: v_min_u16_e32 v1, v8, v1
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_xor_b32_e32 v1, s4, v2
+; GFX9-NEXT: v_min_u16_e32 v1, v1, v5
+; GFX9-NEXT: v_add_u16_e32 v1, v2, v1
+; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v3
+; GFX9-NEXT: v_lshlrev_b16_e32 v3, 8, v6
+; GFX9-NEXT: v_xor_b32_e32 v5, s4, v2
+; GFX9-NEXT: v_min_u16_e32 v3, v5, v3
+; GFX9-NEXT: v_add_u16_e32 v2, v2, v3
+; GFX9-NEXT: v_lshlrev_b16_e32 v3, 8, v4
+; GFX9-NEXT: v_xor_b32_e32 v5, s4, v3
+; GFX9-NEXT: v_lshlrev_b16_e32 v4, 8, v7
+; GFX9-NEXT: s_movk_i32 s4, 0xff
+; GFX9-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_min_u16_e32 v4, v5, v4
+; GFX9-NEXT: v_lshrrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_add_u16_e32 v3, v3, v4
+; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_and_b32_sdwa v2, v3, s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_v4i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_mov_b32 s4, 8
+; GFX10-NEXT: s_mov_b32 s7, 0xffff
+; GFX10-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_lshlrev_b16_e64 v4, 8, v0
+; GFX10-NEXT: s_mov_b32 s5, 16
+; GFX10-NEXT: s_mov_b32 s6, 24
+; GFX10-NEXT: v_xor_b32_e32 v5, s7, v2
+; GFX10-NEXT: v_lshrrev_b32_sdwa v6, s5, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_lshrrev_b32_sdwa v0, s6, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_lshlrev_b16_e64 v7, 8, v1
+; GFX10-NEXT: v_xor_b32_e32 v8, s7, v4
+; GFX10-NEXT: v_min_u16_e64 v3, v5, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v5, s5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_xor_b32_e32 v11, s7, v6
+; GFX10-NEXT: v_lshrrev_b32_sdwa v1, s6, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_min_u16_e64 v7, v8, v7
+; GFX10-NEXT: v_add_nc_u16_e64 v2, v2, v3
+; GFX10-NEXT: v_xor_b32_e32 v3, s7, v0
+; GFX10-NEXT: s_movk_i32 s4, 0xff
+; GFX10-NEXT: v_min_u16_e64 v5, v11, v5
+; GFX10-NEXT: v_add_nc_u16_e64 v4, v4, v7
+; GFX10-NEXT: v_and_b32_sdwa v2, v2, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_min_u16_e64 v1, v3, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_add_nc_u16_e64 v3, v6, v5
+; GFX10-NEXT: v_lshrrev_b16_e64 v4, 8, v4
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 8, v2
+; GFX10-NEXT: v_add_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: v_and_b32_sdwa v1, v3, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_or_b32 v2, v4, s4, v2
+; GFX10-NEXT: v_and_b32_sdwa v0, v0, s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_or3_b32 v0, v2, v1, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %lhs = bitcast i32 %lhs.arg to <4 x i8>
+ %rhs = bitcast i32 %rhs.arg to <4 x i8>
+ %result = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %lhs, <4 x i8> %rhs)
+ %cast.result = bitcast <4 x i8> %result to i32
+ ret i32 %cast.result
+}
+
+define amdgpu_ps i32 @s_uaddsat_v4i8(i32 inreg %lhs.arg, i32 inreg %rhs.arg) {
+; GFX6-LABEL: s_uaddsat_v4i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshr_b32 s2, s0, 8
+; GFX6-NEXT: s_lshr_b32 s3, s0, 16
+; GFX6-NEXT: s_lshr_b32 s4, s0, 24
+; GFX6-NEXT: s_lshl_b32 s0, s0, 24
+; GFX6-NEXT: s_lshr_b32 s5, s1, 8
+; GFX6-NEXT: s_lshr_b32 s6, s1, 16
+; GFX6-NEXT: s_lshr_b32 s7, s1, 24
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_not_b32 s8, s0
+; GFX6-NEXT: s_cmp_lt_u32 s8, s1
+; GFX6-NEXT: s_cselect_b32 s1, s8, s1
+; GFX6-NEXT: s_add_i32 s0, s0, s1
+; GFX6-NEXT: s_lshl_b32 s1, s2, 24
+; GFX6-NEXT: s_lshl_b32 s2, s5, 24
+; GFX6-NEXT: s_lshr_b32 s0, s0, 24
+; GFX6-NEXT: s_not_b32 s5, s1
+; GFX6-NEXT: s_cmp_lt_u32 s5, s2
+; GFX6-NEXT: s_cselect_b32 s2, s5, s2
+; GFX6-NEXT: s_add_i32 s1, s1, s2
+; GFX6-NEXT: s_lshl_b32 s2, s3, 24
+; GFX6-NEXT: s_lshr_b32 s1, s1, 24
+; GFX6-NEXT: s_lshl_b32 s3, s6, 24
+; GFX6-NEXT: s_not_b32 s5, s2
+; GFX6-NEXT: s_cmp_lt_u32 s5, s3
+; GFX6-NEXT: s_cselect_b32 s3, s5, s3
+; GFX6-NEXT: s_add_i32 s2, s2, s3
+; GFX6-NEXT: s_lshl_b32 s3, s4, 24
+; GFX6-NEXT: s_lshr_b32 s2, s2, 24
+; GFX6-NEXT: s_lshl_b32 s4, s7, 24
+; GFX6-NEXT: s_not_b32 s5, s3
+; GFX6-NEXT: s_cmp_lt_u32 s5, s4
+; GFX6-NEXT: s_cselect_b32 s4, s5, s4
+; GFX6-NEXT: s_add_i32 s3, s3, s4
+; GFX6-NEXT: s_movk_i32 s4, 0xff
+; GFX6-NEXT: s_and_b32 s1, s1, s4
+; GFX6-NEXT: s_and_b32 s0, s0, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 8
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshr_b32 s3, s3, 24
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s3, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_v4i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s8, 8, 0x100000
+; GFX8-NEXT: s_lshr_b32 s2, s0, 8
+; GFX8-NEXT: s_lshr_b32 s3, s0, 16
+; GFX8-NEXT: s_lshr_b32 s4, s0, 24
+; GFX8-NEXT: s_lshl_b32 s0, s0, s8
+; GFX8-NEXT: s_mov_b32 s9, 0xffff
+; GFX8-NEXT: s_xor_b32 s10, s0, s9
+; GFX8-NEXT: s_lshr_b32 s5, s1, 8
+; GFX8-NEXT: s_lshr_b32 s6, s1, 16
+; GFX8-NEXT: s_lshr_b32 s7, s1, 24
+; GFX8-NEXT: s_lshl_b32 s1, s1, s8
+; GFX8-NEXT: s_bfe_u32 s10, s10, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s10, s1
+; GFX8-NEXT: s_cselect_b32 s1, s10, s1
+; GFX8-NEXT: s_add_i32 s0, s0, s1
+; GFX8-NEXT: s_lshl_b32 s1, s2, s8
+; GFX8-NEXT: s_lshl_b32 s2, s5, s8
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_xor_b32 s5, s1, s9
+; GFX8-NEXT: s_lshr_b32 s0, s0, s8
+; GFX8-NEXT: s_bfe_u32 s5, s5, 0x100000
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s5, s2
+; GFX8-NEXT: s_cselect_b32 s2, s5, s2
+; GFX8-NEXT: s_add_i32 s1, s1, s2
+; GFX8-NEXT: s_lshl_b32 s2, s3, s8
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_lshl_b32 s3, s6, s8
+; GFX8-NEXT: s_xor_b32 s5, s2, s9
+; GFX8-NEXT: s_lshr_b32 s1, s1, s8
+; GFX8-NEXT: s_bfe_u32 s5, s5, 0x100000
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s5, s3
+; GFX8-NEXT: s_cselect_b32 s3, s5, s3
+; GFX8-NEXT: s_add_i32 s2, s2, s3
+; GFX8-NEXT: s_lshl_b32 s3, s4, s8
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s7, s8
+; GFX8-NEXT: s_xor_b32 s5, s3, s9
+; GFX8-NEXT: s_lshr_b32 s2, s2, s8
+; GFX8-NEXT: s_bfe_u32 s5, s5, 0x100000
+; GFX8-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s5, s4
+; GFX8-NEXT: s_cselect_b32 s4, s5, s4
+; GFX8-NEXT: s_add_i32 s3, s3, s4
+; GFX8-NEXT: s_movk_i32 s4, 0xff
+; GFX8-NEXT: s_and_b32 s1, s1, s4
+; GFX8-NEXT: s_and_b32 s0, s0, s4
+; GFX8-NEXT: s_lshl_b32 s1, s1, 8
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: s_and_b32 s1, s2, s4
+; GFX8-NEXT: s_lshl_b32 s1, s1, 16
+; GFX8-NEXT: s_lshr_b32 s3, s3, s8
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: s_and_b32 s1, s3, s4
+; GFX8-NEXT: s_lshl_b32 s1, s1, 24
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_v4i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s8, 8, 0x100000
+; GFX9-NEXT: s_lshr_b32 s2, s0, 8
+; GFX9-NEXT: s_lshr_b32 s3, s0, 16
+; GFX9-NEXT: s_lshr_b32 s4, s0, 24
+; GFX9-NEXT: s_lshl_b32 s0, s0, s8
+; GFX9-NEXT: s_mov_b32 s9, 0xffff
+; GFX9-NEXT: s_xor_b32 s10, s0, s9
+; GFX9-NEXT: s_lshr_b32 s5, s1, 8
+; GFX9-NEXT: s_lshr_b32 s6, s1, 16
+; GFX9-NEXT: s_lshr_b32 s7, s1, 24
+; GFX9-NEXT: s_lshl_b32 s1, s1, s8
+; GFX9-NEXT: s_bfe_u32 s10, s10, 0x100000
+; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s10, s1
+; GFX9-NEXT: s_cselect_b32 s1, s10, s1
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: s_lshl_b32 s1, s2, s8
+; GFX9-NEXT: s_lshl_b32 s2, s5, s8
+; GFX9-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX9-NEXT: s_xor_b32 s5, s1, s9
+; GFX9-NEXT: s_lshr_b32 s0, s0, s8
+; GFX9-NEXT: s_bfe_u32 s5, s5, 0x100000
+; GFX9-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s5, s2
+; GFX9-NEXT: s_cselect_b32 s2, s5, s2
+; GFX9-NEXT: s_add_i32 s1, s1, s2
+; GFX9-NEXT: s_lshl_b32 s2, s3, s8
+; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX9-NEXT: s_lshl_b32 s3, s6, s8
+; GFX9-NEXT: s_xor_b32 s5, s2, s9
+; GFX9-NEXT: s_lshr_b32 s1, s1, s8
+; GFX9-NEXT: s_bfe_u32 s5, s5, 0x100000
+; GFX9-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s5, s3
+; GFX9-NEXT: s_cselect_b32 s3, s5, s3
+; GFX9-NEXT: s_add_i32 s2, s2, s3
+; GFX9-NEXT: s_lshl_b32 s3, s4, s8
+; GFX9-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX9-NEXT: s_lshl_b32 s4, s7, s8
+; GFX9-NEXT: s_xor_b32 s5, s3, s9
+; GFX9-NEXT: s_lshr_b32 s2, s2, s8
+; GFX9-NEXT: s_bfe_u32 s5, s5, 0x100000
+; GFX9-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s5, s4
+; GFX9-NEXT: s_cselect_b32 s4, s5, s4
+; GFX9-NEXT: s_add_i32 s3, s3, s4
+; GFX9-NEXT: s_movk_i32 s4, 0xff
+; GFX9-NEXT: s_and_b32 s1, s1, s4
+; GFX9-NEXT: s_and_b32 s0, s0, s4
+; GFX9-NEXT: s_lshl_b32 s1, s1, 8
+; GFX9-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: s_and_b32 s1, s2, s4
+; GFX9-NEXT: s_lshl_b32 s1, s1, 16
+; GFX9-NEXT: s_lshr_b32 s3, s3, s8
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: s_and_b32 s1, s3, s4
+; GFX9-NEXT: s_lshl_b32 s1, s1, 24
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_v4i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s5, 8, 0x100000
+; GFX10-NEXT: s_lshr_b32 s2, s0, 8
+; GFX10-NEXT: s_lshr_b32 s3, s0, 16
+; GFX10-NEXT: s_lshr_b32 s4, s0, 24
+; GFX10-NEXT: s_lshl_b32 s0, s0, s5
+; GFX10-NEXT: s_mov_b32 s7, 0xffff
+; GFX10-NEXT: s_lshl_b32 s10, s1, s5
+; GFX10-NEXT: s_xor_b32 s9, s0, s7
+; GFX10-NEXT: s_bfe_u32 s10, s10, 0x100000
+; GFX10-NEXT: s_bfe_u32 s9, s9, 0x100000
+; GFX10-NEXT: s_lshr_b32 s6, s1, 8
+; GFX10-NEXT: s_lshr_b32 s8, s1, 16
+; GFX10-NEXT: s_lshr_b32 s1, s1, 24
+; GFX10-NEXT: s_cmp_lt_u32 s9, s10
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s9, s9, s10
+; GFX10-NEXT: s_lshl_b32 s2, s2, s5
+; GFX10-NEXT: s_add_i32 s0, s0, s9
+; GFX10-NEXT: s_xor_b32 s9, s2, s7
+; GFX10-NEXT: s_lshl_b32 s6, s6, s5
+; GFX10-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX10-NEXT: s_bfe_u32 s9, s9, 0x100000
+; GFX10-NEXT: s_bfe_u32 s6, s6, 0x100000
+; GFX10-NEXT: s_lshr_b32 s0, s0, s5
+; GFX10-NEXT: s_cmp_lt_u32 s9, s6
+; GFX10-NEXT: s_cselect_b32 s6, s9, s6
+; GFX10-NEXT: s_lshl_b32 s3, s3, s5
+; GFX10-NEXT: s_add_i32 s2, s2, s6
+; GFX10-NEXT: s_xor_b32 s6, s3, s7
+; GFX10-NEXT: s_lshl_b32 s8, s8, s5
+; GFX10-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX10-NEXT: s_bfe_u32 s6, s6, 0x100000
+; GFX10-NEXT: s_bfe_u32 s8, s8, 0x100000
+; GFX10-NEXT: s_lshr_b32 s2, s2, s5
+; GFX10-NEXT: s_cmp_lt_u32 s6, s8
+; GFX10-NEXT: s_cselect_b32 s6, s6, s8
+; GFX10-NEXT: s_lshl_b32 s4, s4, s5
+; GFX10-NEXT: s_add_i32 s3, s3, s6
+; GFX10-NEXT: s_xor_b32 s6, s4, s7
+; GFX10-NEXT: s_lshl_b32 s1, s1, s5
+; GFX10-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX10-NEXT: s_bfe_u32 s6, s6, 0x100000
+; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX10-NEXT: s_lshr_b32 s3, s3, s5
+; GFX10-NEXT: s_cmp_lt_u32 s6, s1
+; GFX10-NEXT: s_cselect_b32 s1, s6, s1
+; GFX10-NEXT: s_add_i32 s4, s4, s1
+; GFX10-NEXT: s_bfe_u32 s1, s4, 0x100000
+; GFX10-NEXT: s_movk_i32 s4, 0xff
+; GFX10-NEXT: s_lshr_b32 s1, s1, s5
+; GFX10-NEXT: s_and_b32 s2, s2, s4
+; GFX10-NEXT: s_and_b32 s0, s0, s4
+; GFX10-NEXT: s_lshl_b32 s2, s2, 8
+; GFX10-NEXT: s_and_b32 s3, s3, s4
+; GFX10-NEXT: s_and_b32 s1, s1, s4
+; GFX10-NEXT: s_or_b32 s0, s0, s2
+; GFX10-NEXT: s_lshl_b32 s2, s3, 16
+; GFX10-NEXT: s_lshl_b32 s1, s1, 24
+; GFX10-NEXT: s_or_b32 s0, s0, s2
+; GFX10-NEXT: s_or_b32 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %lhs = bitcast i32 %lhs.arg to <4 x i8>
+ %rhs = bitcast i32 %rhs.arg to <4 x i8>
+ %result = call <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8> %lhs, <4 x i8> %rhs)
+ %cast.result = bitcast <4 x i8> %result to i32
+ ret i32 %cast.result
+}
+
+define i24 @v_uaddsat_i24(i24 %lhs, i24 %rhs) {
+; GFX6-LABEL: v_uaddsat_i24:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX6-NEXT: v_xor_b32_e32 v2, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v1, v2, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 8, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_i24:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_xor_b32_e32 v2, -1, v0
+; GFX8-NEXT: v_min_u32_e32 v1, v2, v1
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v0, 8, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_i24:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_xor_b32_e32 v2, -1, v0
+; GFX9-NEXT: v_min_u32_e32 v1, v2, v1
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_i24:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_xor_b32_e32 v2, -1, v0
+; GFX10-NEXT: v_min_u32_e32 v1, v2, v1
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i24 @llvm.uadd.sat.i24(i24 %lhs, i24 %rhs)
+ ret i24 %result
+}
+
+define amdgpu_ps i24 @s_uaddsat_i24(i24 inreg %lhs, i24 inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_i24:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 8
+; GFX6-NEXT: s_lshl_b32 s1, s1, 8
+; GFX6-NEXT: s_not_b32 s2, s0
+; GFX6-NEXT: s_cmp_lt_u32 s2, s1
+; GFX6-NEXT: s_cselect_b32 s1, s2, s1
+; GFX6-NEXT: s_add_i32 s0, s0, s1
+; GFX6-NEXT: s_lshr_b32 s0, s0, 8
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_i24:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshl_b32 s0, s0, 8
+; GFX8-NEXT: s_lshl_b32 s1, s1, 8
+; GFX8-NEXT: s_not_b32 s2, s0
+; GFX8-NEXT: s_cmp_lt_u32 s2, s1
+; GFX8-NEXT: s_cselect_b32 s1, s2, s1
+; GFX8-NEXT: s_add_i32 s0, s0, s1
+; GFX8-NEXT: s_lshr_b32 s0, s0, 8
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_i24:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_lshl_b32 s0, s0, 8
+; GFX9-NEXT: s_lshl_b32 s1, s1, 8
+; GFX9-NEXT: s_not_b32 s2, s0
+; GFX9-NEXT: s_cmp_lt_u32 s2, s1
+; GFX9-NEXT: s_cselect_b32 s1, s2, s1
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: s_lshr_b32 s0, s0, 8
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_i24:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_lshl_b32 s0, s0, 8
+; GFX10-NEXT: s_lshl_b32 s1, s1, 8
+; GFX10-NEXT: s_not_b32 s2, s0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lt_u32 s2, s1
+; GFX10-NEXT: s_cselect_b32 s1, s2, s1
+; GFX10-NEXT: s_add_i32 s0, s0, s1
+; GFX10-NEXT: s_lshr_b32 s0, s0, 8
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i24 @llvm.uadd.sat.i24(i24 %lhs, i24 %rhs)
+ ret i24 %result
+}
+
+define i32 @v_uaddsat_i32(i32 %lhs, i32 %rhs) {
+; GFX6-LABEL: v_uaddsat_i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_xor_b32_e32 v2, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v1, v2, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v2, -1, v0
+; GFX8-NEXT: v_min_u32_e32 v1, v2, v1
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v2, -1, v0
+; GFX9-NEXT: v_min_u32_e32 v1, v2, v1
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_xor_b32_e32 v2, -1, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_u32_e32 v1, v2, v1
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i32 @llvm.uadd.sat.i32(i32 %lhs, i32 %rhs)
+ ret i32 %result
+}
+
+define amdgpu_ps i32 @s_uaddsat_i32(i32 inreg %lhs, i32 inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_not_b32 s2, s0
+; GFX6-NEXT: s_cmp_lt_u32 s2, s1
+; GFX6-NEXT: s_cselect_b32 s1, s2, s1
+; GFX6-NEXT: s_add_i32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_not_b32 s2, s0
+; GFX8-NEXT: s_cmp_lt_u32 s2, s1
+; GFX8-NEXT: s_cselect_b32 s1, s2, s1
+; GFX8-NEXT: s_add_i32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_not_b32 s2, s0
+; GFX9-NEXT: s_cmp_lt_u32 s2, s1
+; GFX9-NEXT: s_cselect_b32 s1, s2, s1
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_not_b32 s2, s0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lt_u32 s2, s1
+; GFX10-NEXT: s_cselect_b32 s1, s2, s1
+; GFX10-NEXT: s_add_i32 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i32 @llvm.uadd.sat.i32(i32 %lhs, i32 %rhs)
+ ret i32 %result
+}
+
+define amdgpu_ps float @uaddsat_i32_sv(i32 inreg %lhs, i32 %rhs) {
+; GFX6-LABEL: uaddsat_i32_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_not_b32 s1, s0
+; GFX6-NEXT: v_min_u32_e32 v0, s1, v0
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: uaddsat_i32_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_not_b32 s1, s0
+; GFX8-NEXT: v_min_u32_e32 v0, s1, v0
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: uaddsat_i32_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_not_b32 s1, s0
+; GFX9-NEXT: v_min_u32_e32 v0, s1, v0
+; GFX9-NEXT: v_add_u32_e32 v0, s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: uaddsat_i32_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_not_b32 s1, s0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_u32_e32 v0, s1, v0
+; GFX10-NEXT: v_add_nc_u32_e32 v0, s0, v0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i32 @llvm.uadd.sat.i32(i32 %lhs, i32 %rhs)
+ %cast = bitcast i32 %result to float
+ ret float %cast
+}
+
+define amdgpu_ps float @uaddsat_i32_vs(i32 %lhs, i32 inreg %rhs) {
+; GFX6-LABEL: uaddsat_i32_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_xor_b32_e32 v1, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v1, s0, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: uaddsat_i32_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_xor_b32_e32 v1, -1, v0
+; GFX8-NEXT: v_min_u32_e32 v1, s0, v1
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: uaddsat_i32_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_xor_b32_e32 v1, -1, v0
+; GFX9-NEXT: v_min_u32_e32 v1, s0, v1
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: uaddsat_i32_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_xor_b32_e32 v1, -1, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_u32_e32 v1, s0, v1
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i32 @llvm.uadd.sat.i32(i32 %lhs, i32 %rhs)
+ %cast = bitcast i32 %result to float
+ ret float %cast
+}
+
+define <2 x i32> @v_uaddsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; GFX6-LABEL: v_uaddsat_v2i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_xor_b32_e32 v4, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v2, v4, v2
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_xor_b32_e32 v2, -1, v1
+; GFX6-NEXT: v_min_u32_e32 v2, v2, v3
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_v2i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v4, -1, v0
+; GFX8-NEXT: v_min_u32_e32 v2, v4, v2
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_xor_b32_e32 v2, -1, v1
+; GFX8-NEXT: v_min_u32_e32 v2, v2, v3
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_v2i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v4, -1, v0
+; GFX9-NEXT: v_min_u32_e32 v2, v4, v2
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v2
+; GFX9-NEXT: v_xor_b32_e32 v2, -1, v1
+; GFX9-NEXT: v_min_u32_e32 v2, v2, v3
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_v2i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_xor_b32_e32 v4, -1, v0
+; GFX10-NEXT: v_xor_b32_e32 v5, -1, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_u32_e32 v2, v4, v2
+; GFX10-NEXT: v_min_u32_e32 v3, v5, v3
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v2
+; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+ ret <2 x i32> %result
+}
+
+define amdgpu_ps <2 x i32> @s_uaddsat_v2i32(<2 x i32> inreg %lhs, <2 x i32> inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_v2i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_not_b32 s4, s0
+; GFX6-NEXT: s_cmp_lt_u32 s4, s2
+; GFX6-NEXT: s_cselect_b32 s2, s4, s2
+; GFX6-NEXT: s_add_i32 s0, s0, s2
+; GFX6-NEXT: s_not_b32 s2, s1
+; GFX6-NEXT: s_cmp_lt_u32 s2, s3
+; GFX6-NEXT: s_cselect_b32 s2, s2, s3
+; GFX6-NEXT: s_add_i32 s1, s1, s2
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_v2i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_not_b32 s4, s0
+; GFX8-NEXT: s_cmp_lt_u32 s4, s2
+; GFX8-NEXT: s_cselect_b32 s2, s4, s2
+; GFX8-NEXT: s_add_i32 s0, s0, s2
+; GFX8-NEXT: s_not_b32 s2, s1
+; GFX8-NEXT: s_cmp_lt_u32 s2, s3
+; GFX8-NEXT: s_cselect_b32 s2, s2, s3
+; GFX8-NEXT: s_add_i32 s1, s1, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_v2i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_not_b32 s4, s0
+; GFX9-NEXT: s_cmp_lt_u32 s4, s2
+; GFX9-NEXT: s_cselect_b32 s2, s4, s2
+; GFX9-NEXT: s_add_i32 s0, s0, s2
+; GFX9-NEXT: s_not_b32 s2, s1
+; GFX9-NEXT: s_cmp_lt_u32 s2, s3
+; GFX9-NEXT: s_cselect_b32 s2, s2, s3
+; GFX9-NEXT: s_add_i32 s1, s1, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_v2i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_not_b32 s4, s0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lt_u32 s4, s2
+; GFX10-NEXT: s_cselect_b32 s2, s4, s2
+; GFX10-NEXT: s_not_b32 s4, s1
+; GFX10-NEXT: s_add_i32 s0, s0, s2
+; GFX10-NEXT: s_cmp_lt_u32 s4, s3
+; GFX10-NEXT: s_cselect_b32 s2, s4, s3
+; GFX10-NEXT: s_add_i32 s1, s1, s2
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+ ret <2 x i32> %result
+}
+
+define <3 x i32> @v_uaddsat_v3i32(<3 x i32> %lhs, <3 x i32> %rhs) {
+; GFX6-LABEL: v_uaddsat_v3i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_xor_b32_e32 v6, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v3, v6, v3
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v3
+; GFX6-NEXT: v_xor_b32_e32 v3, -1, v1
+; GFX6-NEXT: v_min_u32_e32 v3, v3, v4
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v3
+; GFX6-NEXT: v_xor_b32_e32 v3, -1, v2
+; GFX6-NEXT: v_min_u32_e32 v3, v3, v5
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v3
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_v3i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v6, -1, v0
+; GFX8-NEXT: v_min_u32_e32 v3, v6, v3
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v3
+; GFX8-NEXT: v_xor_b32_e32 v3, -1, v1
+; GFX8-NEXT: v_min_u32_e32 v3, v3, v4
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT: v_xor_b32_e32 v3, -1, v2
+; GFX8-NEXT: v_min_u32_e32 v3, v3, v5
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_v3i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v6, -1, v0
+; GFX9-NEXT: v_min_u32_e32 v3, v6, v3
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v3
+; GFX9-NEXT: v_xor_b32_e32 v3, -1, v1
+; GFX9-NEXT: v_min_u32_e32 v3, v3, v4
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v3
+; GFX9-NEXT: v_xor_b32_e32 v3, -1, v2
+; GFX9-NEXT: v_min_u32_e32 v3, v3, v5
+; GFX9-NEXT: v_add_u32_e32 v2, v2, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_v3i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_xor_b32_e32 v6, -1, v0
+; GFX10-NEXT: v_xor_b32_e32 v7, -1, v1
+; GFX10-NEXT: v_xor_b32_e32 v8, -1, v2
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_u32_e32 v3, v6, v3
+; GFX10-NEXT: v_min_u32_e32 v4, v7, v4
+; GFX10-NEXT: v_min_u32_e32 v5, v8, v5
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v3
+; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v4
+; GFX10-NEXT: v_add_nc_u32_e32 v2, v2, v5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <3 x i32> @llvm.uadd.sat.v3i32(<3 x i32> %lhs, <3 x i32> %rhs)
+ ret <3 x i32> %result
+}
+
+define amdgpu_ps <3 x i32> @s_uaddsat_v3i32(<3 x i32> inreg %lhs, <3 x i32> inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_v3i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_not_b32 s6, s0
+; GFX6-NEXT: s_cmp_lt_u32 s6, s3
+; GFX6-NEXT: s_cselect_b32 s3, s6, s3
+; GFX6-NEXT: s_add_i32 s0, s0, s3
+; GFX6-NEXT: s_not_b32 s3, s1
+; GFX6-NEXT: s_cmp_lt_u32 s3, s4
+; GFX6-NEXT: s_cselect_b32 s3, s3, s4
+; GFX6-NEXT: s_add_i32 s1, s1, s3
+; GFX6-NEXT: s_not_b32 s3, s2
+; GFX6-NEXT: s_cmp_lt_u32 s3, s5
+; GFX6-NEXT: s_cselect_b32 s3, s3, s5
+; GFX6-NEXT: s_add_i32 s2, s2, s3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_v3i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_not_b32 s6, s0
+; GFX8-NEXT: s_cmp_lt_u32 s6, s3
+; GFX8-NEXT: s_cselect_b32 s3, s6, s3
+; GFX8-NEXT: s_add_i32 s0, s0, s3
+; GFX8-NEXT: s_not_b32 s3, s1
+; GFX8-NEXT: s_cmp_lt_u32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_add_i32 s1, s1, s3
+; GFX8-NEXT: s_not_b32 s3, s2
+; GFX8-NEXT: s_cmp_lt_u32 s3, s5
+; GFX8-NEXT: s_cselect_b32 s3, s3, s5
+; GFX8-NEXT: s_add_i32 s2, s2, s3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_v3i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_not_b32 s6, s0
+; GFX9-NEXT: s_cmp_lt_u32 s6, s3
+; GFX9-NEXT: s_cselect_b32 s3, s6, s3
+; GFX9-NEXT: s_add_i32 s0, s0, s3
+; GFX9-NEXT: s_not_b32 s3, s1
+; GFX9-NEXT: s_cmp_lt_u32 s3, s4
+; GFX9-NEXT: s_cselect_b32 s3, s3, s4
+; GFX9-NEXT: s_add_i32 s1, s1, s3
+; GFX9-NEXT: s_not_b32 s3, s2
+; GFX9-NEXT: s_cmp_lt_u32 s3, s5
+; GFX9-NEXT: s_cselect_b32 s3, s3, s5
+; GFX9-NEXT: s_add_i32 s2, s2, s3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_v3i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_not_b32 s6, s0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lt_u32 s6, s3
+; GFX10-NEXT: s_cselect_b32 s3, s6, s3
+; GFX10-NEXT: s_not_b32 s6, s1
+; GFX10-NEXT: s_add_i32 s0, s0, s3
+; GFX10-NEXT: s_cmp_lt_u32 s6, s4
+; GFX10-NEXT: s_cselect_b32 s3, s6, s4
+; GFX10-NEXT: s_not_b32 s4, s2
+; GFX10-NEXT: s_add_i32 s1, s1, s3
+; GFX10-NEXT: s_cmp_lt_u32 s4, s5
+; GFX10-NEXT: s_cselect_b32 s3, s4, s5
+; GFX10-NEXT: s_add_i32 s2, s2, s3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <3 x i32> @llvm.uadd.sat.v3i32(<3 x i32> %lhs, <3 x i32> %rhs)
+ ret <3 x i32> %result
+}
+
+define <4 x i32> @v_uaddsat_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; GFX6-LABEL: v_uaddsat_v4i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_xor_b32_e32 v8, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v4, v8, v4
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v4
+; GFX6-NEXT: v_xor_b32_e32 v4, -1, v1
+; GFX6-NEXT: v_min_u32_e32 v4, v4, v5
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v4
+; GFX6-NEXT: v_xor_b32_e32 v4, -1, v2
+; GFX6-NEXT: v_min_u32_e32 v4, v4, v6
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4
+; GFX6-NEXT: v_xor_b32_e32 v4, -1, v3
+; GFX6-NEXT: v_min_u32_e32 v4, v4, v7
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v4
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_v4i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v8, -1, v0
+; GFX8-NEXT: v_min_u32_e32 v4, v8, v4
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v4
+; GFX8-NEXT: v_xor_b32_e32 v4, -1, v1
+; GFX8-NEXT: v_min_u32_e32 v4, v4, v5
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v4
+; GFX8-NEXT: v_xor_b32_e32 v4, -1, v2
+; GFX8-NEXT: v_min_u32_e32 v4, v4, v6
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v4
+; GFX8-NEXT: v_xor_b32_e32 v4, -1, v3
+; GFX8-NEXT: v_min_u32_e32 v4, v4, v7
+; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v4
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_v4i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v8, -1, v0
+; GFX9-NEXT: v_min_u32_e32 v4, v8, v4
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v4
+; GFX9-NEXT: v_xor_b32_e32 v4, -1, v1
+; GFX9-NEXT: v_min_u32_e32 v4, v4, v5
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v4
+; GFX9-NEXT: v_xor_b32_e32 v4, -1, v2
+; GFX9-NEXT: v_min_u32_e32 v4, v4, v6
+; GFX9-NEXT: v_add_u32_e32 v2, v2, v4
+; GFX9-NEXT: v_xor_b32_e32 v4, -1, v3
+; GFX9-NEXT: v_min_u32_e32 v4, v4, v7
+; GFX9-NEXT: v_add_u32_e32 v3, v3, v4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_v4i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_xor_b32_e32 v15, -1, v0
+; GFX10-NEXT: v_xor_b32_e32 v19, -1, v1
+; GFX10-NEXT: v_xor_b32_e32 v23, -1, v2
+; GFX10-NEXT: v_xor_b32_e32 v10, -1, v3
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_u32_e32 v11, v15, v4
+; GFX10-NEXT: v_min_u32_e32 v15, v19, v5
+; GFX10-NEXT: v_min_u32_e32 v19, v23, v6
+; GFX10-NEXT: v_min_u32_e32 v6, v10, v7
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v11
+; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v15
+; GFX10-NEXT: v_add_nc_u32_e32 v2, v2, v19
+; GFX10-NEXT: v_add_nc_u32_e32 v3, v3, v6
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+ ret <4 x i32> %result
+}
+
+define amdgpu_ps <4 x i32> @s_uaddsat_v4i32(<4 x i32> inreg %lhs, <4 x i32> inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_v4i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_not_b32 s8, s0
+; GFX6-NEXT: s_cmp_lt_u32 s8, s4
+; GFX6-NEXT: s_cselect_b32 s4, s8, s4
+; GFX6-NEXT: s_add_i32 s0, s0, s4
+; GFX6-NEXT: s_not_b32 s4, s1
+; GFX6-NEXT: s_cmp_lt_u32 s4, s5
+; GFX6-NEXT: s_cselect_b32 s4, s4, s5
+; GFX6-NEXT: s_add_i32 s1, s1, s4
+; GFX6-NEXT: s_not_b32 s4, s2
+; GFX6-NEXT: s_cmp_lt_u32 s4, s6
+; GFX6-NEXT: s_cselect_b32 s4, s4, s6
+; GFX6-NEXT: s_add_i32 s2, s2, s4
+; GFX6-NEXT: s_not_b32 s4, s3
+; GFX6-NEXT: s_cmp_lt_u32 s4, s7
+; GFX6-NEXT: s_cselect_b32 s4, s4, s7
+; GFX6-NEXT: s_add_i32 s3, s3, s4
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_v4i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_not_b32 s8, s0
+; GFX8-NEXT: s_cmp_lt_u32 s8, s4
+; GFX8-NEXT: s_cselect_b32 s4, s8, s4
+; GFX8-NEXT: s_add_i32 s0, s0, s4
+; GFX8-NEXT: s_not_b32 s4, s1
+; GFX8-NEXT: s_cmp_lt_u32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s1, s1, s4
+; GFX8-NEXT: s_not_b32 s4, s2
+; GFX8-NEXT: s_cmp_lt_u32 s4, s6
+; GFX8-NEXT: s_cselect_b32 s4, s4, s6
+; GFX8-NEXT: s_add_i32 s2, s2, s4
+; GFX8-NEXT: s_not_b32 s4, s3
+; GFX8-NEXT: s_cmp_lt_u32 s4, s7
+; GFX8-NEXT: s_cselect_b32 s4, s4, s7
+; GFX8-NEXT: s_add_i32 s3, s3, s4
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_v4i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_not_b32 s8, s0
+; GFX9-NEXT: s_cmp_lt_u32 s8, s4
+; GFX9-NEXT: s_cselect_b32 s4, s8, s4
+; GFX9-NEXT: s_add_i32 s0, s0, s4
+; GFX9-NEXT: s_not_b32 s4, s1
+; GFX9-NEXT: s_cmp_lt_u32 s4, s5
+; GFX9-NEXT: s_cselect_b32 s4, s4, s5
+; GFX9-NEXT: s_add_i32 s1, s1, s4
+; GFX9-NEXT: s_not_b32 s4, s2
+; GFX9-NEXT: s_cmp_lt_u32 s4, s6
+; GFX9-NEXT: s_cselect_b32 s4, s4, s6
+; GFX9-NEXT: s_add_i32 s2, s2, s4
+; GFX9-NEXT: s_not_b32 s4, s3
+; GFX9-NEXT: s_cmp_lt_u32 s4, s7
+; GFX9-NEXT: s_cselect_b32 s4, s4, s7
+; GFX9-NEXT: s_add_i32 s3, s3, s4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_v4i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_not_b32 s8, s0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lt_u32 s8, s4
+; GFX10-NEXT: s_cselect_b32 s4, s8, s4
+; GFX10-NEXT: s_not_b32 s8, s1
+; GFX10-NEXT: s_add_i32 s0, s0, s4
+; GFX10-NEXT: s_cmp_lt_u32 s8, s5
+; GFX10-NEXT: s_cselect_b32 s4, s8, s5
+; GFX10-NEXT: s_not_b32 s5, s2
+; GFX10-NEXT: s_add_i32 s1, s1, s4
+; GFX10-NEXT: s_cmp_lt_u32 s5, s6
+; GFX10-NEXT: s_cselect_b32 s4, s5, s6
+; GFX10-NEXT: s_not_b32 s5, s3
+; GFX10-NEXT: s_add_i32 s2, s2, s4
+; GFX10-NEXT: s_cmp_lt_u32 s5, s7
+; GFX10-NEXT: s_cselect_b32 s4, s5, s7
+; GFX10-NEXT: s_add_i32 s3, s3, s4
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+ ret <4 x i32> %result
+}
+
+define <5 x i32> @v_uaddsat_v5i32(<5 x i32> %lhs, <5 x i32> %rhs) {
+; GFX6-LABEL: v_uaddsat_v5i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_xor_b32_e32 v10, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v5, v10, v5
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v5
+; GFX6-NEXT: v_xor_b32_e32 v5, -1, v1
+; GFX6-NEXT: v_min_u32_e32 v5, v5, v6
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v5
+; GFX6-NEXT: v_xor_b32_e32 v5, -1, v2
+; GFX6-NEXT: v_min_u32_e32 v5, v5, v7
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v5
+; GFX6-NEXT: v_xor_b32_e32 v5, -1, v3
+; GFX6-NEXT: v_min_u32_e32 v5, v5, v8
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v5
+; GFX6-NEXT: v_xor_b32_e32 v5, -1, v4
+; GFX6-NEXT: v_min_u32_e32 v5, v5, v9
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v5
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_v5i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v10, -1, v0
+; GFX8-NEXT: v_min_u32_e32 v5, v10, v5
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v5
+; GFX8-NEXT: v_xor_b32_e32 v5, -1, v1
+; GFX8-NEXT: v_min_u32_e32 v5, v5, v6
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v5
+; GFX8-NEXT: v_xor_b32_e32 v5, -1, v2
+; GFX8-NEXT: v_min_u32_e32 v5, v5, v7
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v5
+; GFX8-NEXT: v_xor_b32_e32 v5, -1, v3
+; GFX8-NEXT: v_min_u32_e32 v5, v5, v8
+; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v5
+; GFX8-NEXT: v_xor_b32_e32 v5, -1, v4
+; GFX8-NEXT: v_min_u32_e32 v5, v5, v9
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v5
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_v5i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v10, -1, v0
+; GFX9-NEXT: v_min_u32_e32 v5, v10, v5
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v5
+; GFX9-NEXT: v_xor_b32_e32 v5, -1, v1
+; GFX9-NEXT: v_min_u32_e32 v5, v5, v6
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v5
+; GFX9-NEXT: v_xor_b32_e32 v5, -1, v2
+; GFX9-NEXT: v_min_u32_e32 v5, v5, v7
+; GFX9-NEXT: v_add_u32_e32 v2, v2, v5
+; GFX9-NEXT: v_xor_b32_e32 v5, -1, v3
+; GFX9-NEXT: v_min_u32_e32 v5, v5, v8
+; GFX9-NEXT: v_add_u32_e32 v3, v3, v5
+; GFX9-NEXT: v_xor_b32_e32 v5, -1, v4
+; GFX9-NEXT: v_min_u32_e32 v5, v5, v9
+; GFX9-NEXT: v_add_u32_e32 v4, v4, v5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_v5i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_xor_b32_e32 v10, -1, v0
+; GFX10-NEXT: v_xor_b32_e32 v11, -1, v1
+; GFX10-NEXT: v_xor_b32_e32 v12, -1, v2
+; GFX10-NEXT: v_xor_b32_e32 v13, -1, v3
+; GFX10-NEXT: v_xor_b32_e32 v14, -1, v4
+; GFX10-NEXT: v_min_u32_e32 v5, v10, v5
+; GFX10-NEXT: v_min_u32_e32 v6, v11, v6
+; GFX10-NEXT: v_min_u32_e32 v7, v12, v7
+; GFX10-NEXT: v_min_u32_e32 v8, v13, v8
+; GFX10-NEXT: v_min_u32_e32 v9, v14, v9
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v5
+; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v6
+; GFX10-NEXT: v_add_nc_u32_e32 v2, v2, v7
+; GFX10-NEXT: v_add_nc_u32_e32 v3, v3, v8
+; GFX10-NEXT: v_add_nc_u32_e32 v4, v4, v9
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <5 x i32> @llvm.uadd.sat.v5i32(<5 x i32> %lhs, <5 x i32> %rhs)
+ ret <5 x i32> %result
+}
+
+define amdgpu_ps <5 x i32> @s_uaddsat_v5i32(<5 x i32> inreg %lhs, <5 x i32> inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_v5i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_not_b32 s10, s0
+; GFX6-NEXT: s_cmp_lt_u32 s10, s5
+; GFX6-NEXT: s_cselect_b32 s5, s10, s5
+; GFX6-NEXT: s_add_i32 s0, s0, s5
+; GFX6-NEXT: s_not_b32 s5, s1
+; GFX6-NEXT: s_cmp_lt_u32 s5, s6
+; GFX6-NEXT: s_cselect_b32 s5, s5, s6
+; GFX6-NEXT: s_add_i32 s1, s1, s5
+; GFX6-NEXT: s_not_b32 s5, s2
+; GFX6-NEXT: s_cmp_lt_u32 s5, s7
+; GFX6-NEXT: s_cselect_b32 s5, s5, s7
+; GFX6-NEXT: s_add_i32 s2, s2, s5
+; GFX6-NEXT: s_not_b32 s5, s3
+; GFX6-NEXT: s_cmp_lt_u32 s5, s8
+; GFX6-NEXT: s_cselect_b32 s5, s5, s8
+; GFX6-NEXT: s_add_i32 s3, s3, s5
+; GFX6-NEXT: s_not_b32 s5, s4
+; GFX6-NEXT: s_cmp_lt_u32 s5, s9
+; GFX6-NEXT: s_cselect_b32 s5, s5, s9
+; GFX6-NEXT: s_add_i32 s4, s4, s5
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_v5i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_not_b32 s10, s0
+; GFX8-NEXT: s_cmp_lt_u32 s10, s5
+; GFX8-NEXT: s_cselect_b32 s5, s10, s5
+; GFX8-NEXT: s_add_i32 s0, s0, s5
+; GFX8-NEXT: s_not_b32 s5, s1
+; GFX8-NEXT: s_cmp_lt_u32 s5, s6
+; GFX8-NEXT: s_cselect_b32 s5, s5, s6
+; GFX8-NEXT: s_add_i32 s1, s1, s5
+; GFX8-NEXT: s_not_b32 s5, s2
+; GFX8-NEXT: s_cmp_lt_u32 s5, s7
+; GFX8-NEXT: s_cselect_b32 s5, s5, s7
+; GFX8-NEXT: s_add_i32 s2, s2, s5
+; GFX8-NEXT: s_not_b32 s5, s3
+; GFX8-NEXT: s_cmp_lt_u32 s5, s8
+; GFX8-NEXT: s_cselect_b32 s5, s5, s8
+; GFX8-NEXT: s_add_i32 s3, s3, s5
+; GFX8-NEXT: s_not_b32 s5, s4
+; GFX8-NEXT: s_cmp_lt_u32 s5, s9
+; GFX8-NEXT: s_cselect_b32 s5, s5, s9
+; GFX8-NEXT: s_add_i32 s4, s4, s5
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_v5i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_not_b32 s10, s0
+; GFX9-NEXT: s_cmp_lt_u32 s10, s5
+; GFX9-NEXT: s_cselect_b32 s5, s10, s5
+; GFX9-NEXT: s_add_i32 s0, s0, s5
+; GFX9-NEXT: s_not_b32 s5, s1
+; GFX9-NEXT: s_cmp_lt_u32 s5, s6
+; GFX9-NEXT: s_cselect_b32 s5, s5, s6
+; GFX9-NEXT: s_add_i32 s1, s1, s5
+; GFX9-NEXT: s_not_b32 s5, s2
+; GFX9-NEXT: s_cmp_lt_u32 s5, s7
+; GFX9-NEXT: s_cselect_b32 s5, s5, s7
+; GFX9-NEXT: s_add_i32 s2, s2, s5
+; GFX9-NEXT: s_not_b32 s5, s3
+; GFX9-NEXT: s_cmp_lt_u32 s5, s8
+; GFX9-NEXT: s_cselect_b32 s5, s5, s8
+; GFX9-NEXT: s_add_i32 s3, s3, s5
+; GFX9-NEXT: s_not_b32 s5, s4
+; GFX9-NEXT: s_cmp_lt_u32 s5, s9
+; GFX9-NEXT: s_cselect_b32 s5, s5, s9
+; GFX9-NEXT: s_add_i32 s4, s4, s5
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_v5i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_not_b32 s10, s0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lt_u32 s10, s5
+; GFX10-NEXT: s_cselect_b32 s5, s10, s5
+; GFX10-NEXT: s_not_b32 s10, s1
+; GFX10-NEXT: s_add_i32 s0, s0, s5
+; GFX10-NEXT: s_cmp_lt_u32 s10, s6
+; GFX10-NEXT: s_cselect_b32 s5, s10, s6
+; GFX10-NEXT: s_not_b32 s6, s2
+; GFX10-NEXT: s_add_i32 s1, s1, s5
+; GFX10-NEXT: s_cmp_lt_u32 s6, s7
+; GFX10-NEXT: s_cselect_b32 s5, s6, s7
+; GFX10-NEXT: s_not_b32 s6, s3
+; GFX10-NEXT: s_add_i32 s2, s2, s5
+; GFX10-NEXT: s_cmp_lt_u32 s6, s8
+; GFX10-NEXT: s_cselect_b32 s5, s6, s8
+; GFX10-NEXT: s_not_b32 s6, s4
+; GFX10-NEXT: s_add_i32 s3, s3, s5
+; GFX10-NEXT: s_cmp_lt_u32 s6, s9
+; GFX10-NEXT: s_cselect_b32 s5, s6, s9
+; GFX10-NEXT: s_add_i32 s4, s4, s5
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <5 x i32> @llvm.uadd.sat.v5i32(<5 x i32> %lhs, <5 x i32> %rhs)
+ ret <5 x i32> %result
+}
+
+define <16 x i32> @v_uaddsat_v16i32(<16 x i32> %lhs, <16 x i32> %rhs) {
+; GFX6-LABEL: v_uaddsat_v16i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_xor_b32_e32 v32, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v16, v32, v16
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v16
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v1
+; GFX6-NEXT: v_min_u32_e32 v16, v16, v17
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v16
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v2
+; GFX6-NEXT: v_min_u32_e32 v16, v16, v18
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v16
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v3
+; GFX6-NEXT: v_min_u32_e32 v16, v16, v19
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v16
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v4
+; GFX6-NEXT: v_min_u32_e32 v16, v16, v20
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v16
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v5
+; GFX6-NEXT: v_min_u32_e32 v16, v16, v21
+; GFX6-NEXT: v_add_i32_e32 v5, vcc, v5, v16
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v6
+; GFX6-NEXT: v_min_u32_e32 v16, v16, v22
+; GFX6-NEXT: v_add_i32_e32 v6, vcc, v6, v16
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v7
+; GFX6-NEXT: v_min_u32_e32 v16, v16, v23
+; GFX6-NEXT: v_add_i32_e32 v7, vcc, v7, v16
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v8
+; GFX6-NEXT: v_min_u32_e32 v16, v16, v24
+; GFX6-NEXT: v_add_i32_e32 v8, vcc, v8, v16
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v9
+; GFX6-NEXT: v_min_u32_e32 v16, v16, v25
+; GFX6-NEXT: v_add_i32_e32 v9, vcc, v9, v16
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v10
+; GFX6-NEXT: v_min_u32_e32 v16, v16, v26
+; GFX6-NEXT: v_add_i32_e32 v10, vcc, v10, v16
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v11
+; GFX6-NEXT: v_min_u32_e32 v16, v16, v27
+; GFX6-NEXT: v_add_i32_e32 v11, vcc, v11, v16
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v12
+; GFX6-NEXT: v_min_u32_e32 v16, v16, v28
+; GFX6-NEXT: v_add_i32_e32 v12, vcc, v12, v16
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v13
+; GFX6-NEXT: v_min_u32_e32 v16, v16, v29
+; GFX6-NEXT: v_add_i32_e32 v13, vcc, v13, v16
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v14
+; GFX6-NEXT: v_min_u32_e32 v16, v16, v30
+; GFX6-NEXT: v_add_i32_e32 v14, vcc, v14, v16
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v15
+; GFX6-NEXT: v_min_u32_e32 v16, v16, v31
+; GFX6-NEXT: v_add_i32_e32 v15, vcc, v15, v16
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_v16i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v32, -1, v0
+; GFX8-NEXT: v_min_u32_e32 v16, v32, v16
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v16
+; GFX8-NEXT: v_xor_b32_e32 v16, -1, v1
+; GFX8-NEXT: v_min_u32_e32 v16, v16, v17
+; GFX8-NEXT: v_add_u32_e32 v1, vcc, v1, v16
+; GFX8-NEXT: v_xor_b32_e32 v16, -1, v2
+; GFX8-NEXT: v_min_u32_e32 v16, v16, v18
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v16
+; GFX8-NEXT: v_xor_b32_e32 v16, -1, v3
+; GFX8-NEXT: v_min_u32_e32 v16, v16, v19
+; GFX8-NEXT: v_add_u32_e32 v3, vcc, v3, v16
+; GFX8-NEXT: v_xor_b32_e32 v16, -1, v4
+; GFX8-NEXT: v_min_u32_e32 v16, v16, v20
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v16
+; GFX8-NEXT: v_xor_b32_e32 v16, -1, v5
+; GFX8-NEXT: v_min_u32_e32 v16, v16, v21
+; GFX8-NEXT: v_add_u32_e32 v5, vcc, v5, v16
+; GFX8-NEXT: v_xor_b32_e32 v16, -1, v6
+; GFX8-NEXT: v_min_u32_e32 v16, v16, v22
+; GFX8-NEXT: v_add_u32_e32 v6, vcc, v6, v16
+; GFX8-NEXT: v_xor_b32_e32 v16, -1, v7
+; GFX8-NEXT: v_min_u32_e32 v16, v16, v23
+; GFX8-NEXT: v_add_u32_e32 v7, vcc, v7, v16
+; GFX8-NEXT: v_xor_b32_e32 v16, -1, v8
+; GFX8-NEXT: v_min_u32_e32 v16, v16, v24
+; GFX8-NEXT: v_add_u32_e32 v8, vcc, v8, v16
+; GFX8-NEXT: v_xor_b32_e32 v16, -1, v9
+; GFX8-NEXT: v_min_u32_e32 v16, v16, v25
+; GFX8-NEXT: v_add_u32_e32 v9, vcc, v9, v16
+; GFX8-NEXT: v_xor_b32_e32 v16, -1, v10
+; GFX8-NEXT: v_min_u32_e32 v16, v16, v26
+; GFX8-NEXT: v_add_u32_e32 v10, vcc, v10, v16
+; GFX8-NEXT: v_xor_b32_e32 v16, -1, v11
+; GFX8-NEXT: v_min_u32_e32 v16, v16, v27
+; GFX8-NEXT: v_add_u32_e32 v11, vcc, v11, v16
+; GFX8-NEXT: v_xor_b32_e32 v16, -1, v12
+; GFX8-NEXT: v_min_u32_e32 v16, v16, v28
+; GFX8-NEXT: v_add_u32_e32 v12, vcc, v12, v16
+; GFX8-NEXT: v_xor_b32_e32 v16, -1, v13
+; GFX8-NEXT: v_min_u32_e32 v16, v16, v29
+; GFX8-NEXT: v_add_u32_e32 v13, vcc, v13, v16
+; GFX8-NEXT: v_xor_b32_e32 v16, -1, v14
+; GFX8-NEXT: v_min_u32_e32 v16, v16, v30
+; GFX8-NEXT: v_add_u32_e32 v14, vcc, v14, v16
+; GFX8-NEXT: v_xor_b32_e32 v16, -1, v15
+; GFX8-NEXT: v_min_u32_e32 v16, v16, v31
+; GFX8-NEXT: v_add_u32_e32 v15, vcc, v15, v16
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_v16i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v32, -1, v0
+; GFX9-NEXT: v_min_u32_e32 v16, v32, v16
+; GFX9-NEXT: v_add_u32_e32 v0, v0, v16
+; GFX9-NEXT: v_xor_b32_e32 v16, -1, v1
+; GFX9-NEXT: v_min_u32_e32 v16, v16, v17
+; GFX9-NEXT: v_add_u32_e32 v1, v1, v16
+; GFX9-NEXT: v_xor_b32_e32 v16, -1, v2
+; GFX9-NEXT: v_min_u32_e32 v16, v16, v18
+; GFX9-NEXT: v_add_u32_e32 v2, v2, v16
+; GFX9-NEXT: v_xor_b32_e32 v16, -1, v3
+; GFX9-NEXT: v_min_u32_e32 v16, v16, v19
+; GFX9-NEXT: v_add_u32_e32 v3, v3, v16
+; GFX9-NEXT: v_xor_b32_e32 v16, -1, v4
+; GFX9-NEXT: v_min_u32_e32 v16, v16, v20
+; GFX9-NEXT: v_add_u32_e32 v4, v4, v16
+; GFX9-NEXT: v_xor_b32_e32 v16, -1, v5
+; GFX9-NEXT: v_min_u32_e32 v16, v16, v21
+; GFX9-NEXT: v_add_u32_e32 v5, v5, v16
+; GFX9-NEXT: v_xor_b32_e32 v16, -1, v6
+; GFX9-NEXT: v_min_u32_e32 v16, v16, v22
+; GFX9-NEXT: v_add_u32_e32 v6, v6, v16
+; GFX9-NEXT: v_xor_b32_e32 v16, -1, v7
+; GFX9-NEXT: v_min_u32_e32 v16, v16, v23
+; GFX9-NEXT: v_add_u32_e32 v7, v7, v16
+; GFX9-NEXT: v_xor_b32_e32 v16, -1, v8
+; GFX9-NEXT: v_min_u32_e32 v16, v16, v24
+; GFX9-NEXT: v_add_u32_e32 v8, v8, v16
+; GFX9-NEXT: v_xor_b32_e32 v16, -1, v9
+; GFX9-NEXT: v_min_u32_e32 v16, v16, v25
+; GFX9-NEXT: v_add_u32_e32 v9, v9, v16
+; GFX9-NEXT: v_xor_b32_e32 v16, -1, v10
+; GFX9-NEXT: v_min_u32_e32 v16, v16, v26
+; GFX9-NEXT: v_add_u32_e32 v10, v10, v16
+; GFX9-NEXT: v_xor_b32_e32 v16, -1, v11
+; GFX9-NEXT: v_min_u32_e32 v16, v16, v27
+; GFX9-NEXT: v_add_u32_e32 v11, v11, v16
+; GFX9-NEXT: v_xor_b32_e32 v16, -1, v12
+; GFX9-NEXT: v_min_u32_e32 v16, v16, v28
+; GFX9-NEXT: v_add_u32_e32 v12, v12, v16
+; GFX9-NEXT: v_xor_b32_e32 v16, -1, v13
+; GFX9-NEXT: v_min_u32_e32 v16, v16, v29
+; GFX9-NEXT: v_add_u32_e32 v13, v13, v16
+; GFX9-NEXT: v_xor_b32_e32 v16, -1, v14
+; GFX9-NEXT: v_min_u32_e32 v16, v16, v30
+; GFX9-NEXT: v_add_u32_e32 v14, v14, v16
+; GFX9-NEXT: v_xor_b32_e32 v16, -1, v15
+; GFX9-NEXT: v_min_u32_e32 v16, v16, v31
+; GFX9-NEXT: v_add_u32_e32 v15, v15, v16
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_v16i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_xor_b32_e32 v35, -1, v0
+; GFX10-NEXT: v_xor_b32_e32 v32, -1, v2
+; GFX10-NEXT: v_xor_b32_e32 v33, -1, v3
+; GFX10-NEXT: v_xor_b32_e32 v34, -1, v4
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_u32_e32 v35, v35, v16
+; GFX10-NEXT: v_xor_b32_e32 v16, -1, v1
+; GFX10-NEXT: v_add_nc_u32_e32 v0, v0, v35
+; GFX10-NEXT: v_xor_b32_e32 v35, -1, v5
+; GFX10-NEXT: v_min_u32_e32 v16, v16, v17
+; GFX10-NEXT: v_min_u32_e32 v17, v32, v18
+; GFX10-NEXT: v_min_u32_e32 v18, v33, v19
+; GFX10-NEXT: v_min_u32_e32 v19, v34, v20
+; GFX10-NEXT: v_min_u32_e32 v20, v35, v21
+; GFX10-NEXT: v_add_nc_u32_e32 v1, v1, v16
+; GFX10-NEXT: v_xor_b32_e32 v16, -1, v6
+; GFX10-NEXT: v_add_nc_u32_e32 v2, v2, v17
+; GFX10-NEXT: v_xor_b32_e32 v17, -1, v7
+; GFX10-NEXT: v_add_nc_u32_e32 v3, v3, v18
+; GFX10-NEXT: v_xor_b32_e32 v18, -1, v8
+; GFX10-NEXT: v_add_nc_u32_e32 v4, v4, v19
+; GFX10-NEXT: v_xor_b32_e32 v19, -1, v9
+; GFX10-NEXT: v_add_nc_u32_e32 v5, v5, v20
+; GFX10-NEXT: v_xor_b32_e32 v20, -1, v10
+; GFX10-NEXT: v_min_u32_e32 v16, v16, v22
+; GFX10-NEXT: v_min_u32_e32 v17, v17, v23
+; GFX10-NEXT: v_min_u32_e32 v18, v18, v24
+; GFX10-NEXT: v_min_u32_e32 v19, v19, v25
+; GFX10-NEXT: v_min_u32_e32 v20, v20, v26
+; GFX10-NEXT: v_add_nc_u32_e32 v6, v6, v16
+; GFX10-NEXT: v_xor_b32_e32 v16, -1, v11
+; GFX10-NEXT: v_add_nc_u32_e32 v7, v7, v17
+; GFX10-NEXT: v_xor_b32_e32 v17, -1, v12
+; GFX10-NEXT: v_add_nc_u32_e32 v8, v8, v18
+; GFX10-NEXT: v_xor_b32_e32 v18, -1, v13
+; GFX10-NEXT: v_add_nc_u32_e32 v9, v9, v19
+; GFX10-NEXT: v_xor_b32_e32 v19, -1, v14
+; GFX10-NEXT: v_add_nc_u32_e32 v10, v10, v20
+; GFX10-NEXT: v_xor_b32_e32 v20, -1, v15
+; GFX10-NEXT: v_min_u32_e32 v16, v16, v27
+; GFX10-NEXT: v_min_u32_e32 v17, v17, v28
+; GFX10-NEXT: v_min_u32_e32 v18, v18, v29
+; GFX10-NEXT: v_min_u32_e32 v19, v19, v30
+; GFX10-NEXT: v_min_u32_e32 v20, v20, v31
+; GFX10-NEXT: v_add_nc_u32_e32 v11, v11, v16
+; GFX10-NEXT: v_add_nc_u32_e32 v12, v12, v17
+; GFX10-NEXT: v_add_nc_u32_e32 v13, v13, v18
+; GFX10-NEXT: v_add_nc_u32_e32 v14, v14, v19
+; GFX10-NEXT: v_add_nc_u32_e32 v15, v15, v20
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %lhs, <16 x i32> %rhs)
+ ret <16 x i32> %result
+}
+
+define amdgpu_ps <16 x i32> @s_uaddsat_v16i32(<16 x i32> inreg %lhs, <16 x i32> inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_v16i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_not_b32 s32, s0
+; GFX6-NEXT: s_cmp_lt_u32 s32, s16
+; GFX6-NEXT: s_cselect_b32 s16, s32, s16
+; GFX6-NEXT: s_add_i32 s0, s0, s16
+; GFX6-NEXT: s_not_b32 s16, s1
+; GFX6-NEXT: s_cmp_lt_u32 s16, s17
+; GFX6-NEXT: s_cselect_b32 s16, s16, s17
+; GFX6-NEXT: s_add_i32 s1, s1, s16
+; GFX6-NEXT: s_not_b32 s16, s2
+; GFX6-NEXT: s_cmp_lt_u32 s16, s18
+; GFX6-NEXT: s_cselect_b32 s16, s16, s18
+; GFX6-NEXT: s_add_i32 s2, s2, s16
+; GFX6-NEXT: s_not_b32 s16, s3
+; GFX6-NEXT: s_cmp_lt_u32 s16, s19
+; GFX6-NEXT: s_cselect_b32 s16, s16, s19
+; GFX6-NEXT: s_add_i32 s3, s3, s16
+; GFX6-NEXT: s_not_b32 s16, s4
+; GFX6-NEXT: s_cmp_lt_u32 s16, s20
+; GFX6-NEXT: s_cselect_b32 s16, s16, s20
+; GFX6-NEXT: s_add_i32 s4, s4, s16
+; GFX6-NEXT: s_not_b32 s16, s5
+; GFX6-NEXT: s_cmp_lt_u32 s16, s21
+; GFX6-NEXT: s_cselect_b32 s16, s16, s21
+; GFX6-NEXT: s_add_i32 s5, s5, s16
+; GFX6-NEXT: s_not_b32 s16, s6
+; GFX6-NEXT: s_cmp_lt_u32 s16, s22
+; GFX6-NEXT: s_cselect_b32 s16, s16, s22
+; GFX6-NEXT: s_add_i32 s6, s6, s16
+; GFX6-NEXT: s_not_b32 s16, s7
+; GFX6-NEXT: s_cmp_lt_u32 s16, s23
+; GFX6-NEXT: s_cselect_b32 s16, s16, s23
+; GFX6-NEXT: s_add_i32 s7, s7, s16
+; GFX6-NEXT: s_not_b32 s16, s8
+; GFX6-NEXT: s_cmp_lt_u32 s16, s24
+; GFX6-NEXT: s_cselect_b32 s16, s16, s24
+; GFX6-NEXT: s_add_i32 s8, s8, s16
+; GFX6-NEXT: s_not_b32 s16, s9
+; GFX6-NEXT: s_cmp_lt_u32 s16, s25
+; GFX6-NEXT: s_cselect_b32 s16, s16, s25
+; GFX6-NEXT: s_add_i32 s9, s9, s16
+; GFX6-NEXT: s_not_b32 s16, s10
+; GFX6-NEXT: s_cmp_lt_u32 s16, s26
+; GFX6-NEXT: s_cselect_b32 s16, s16, s26
+; GFX6-NEXT: s_add_i32 s10, s10, s16
+; GFX6-NEXT: s_not_b32 s16, s11
+; GFX6-NEXT: s_cmp_lt_u32 s16, s27
+; GFX6-NEXT: s_cselect_b32 s16, s16, s27
+; GFX6-NEXT: s_add_i32 s11, s11, s16
+; GFX6-NEXT: s_not_b32 s16, s12
+; GFX6-NEXT: s_cmp_lt_u32 s16, s28
+; GFX6-NEXT: s_cselect_b32 s16, s16, s28
+; GFX6-NEXT: s_add_i32 s12, s12, s16
+; GFX6-NEXT: s_not_b32 s16, s13
+; GFX6-NEXT: s_cmp_lt_u32 s16, s29
+; GFX6-NEXT: s_cselect_b32 s16, s16, s29
+; GFX6-NEXT: s_add_i32 s13, s13, s16
+; GFX6-NEXT: s_not_b32 s16, s14
+; GFX6-NEXT: s_cmp_lt_u32 s16, s30
+; GFX6-NEXT: s_cselect_b32 s16, s16, s30
+; GFX6-NEXT: s_add_i32 s14, s14, s16
+; GFX6-NEXT: s_not_b32 s16, s15
+; GFX6-NEXT: s_cmp_lt_u32 s16, s31
+; GFX6-NEXT: s_cselect_b32 s16, s16, s31
+; GFX6-NEXT: s_add_i32 s15, s15, s16
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_v16i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_not_b32 s32, s0
+; GFX8-NEXT: s_cmp_lt_u32 s32, s16
+; GFX8-NEXT: s_cselect_b32 s16, s32, s16
+; GFX8-NEXT: s_add_i32 s0, s0, s16
+; GFX8-NEXT: s_not_b32 s16, s1
+; GFX8-NEXT: s_cmp_lt_u32 s16, s17
+; GFX8-NEXT: s_cselect_b32 s16, s16, s17
+; GFX8-NEXT: s_add_i32 s1, s1, s16
+; GFX8-NEXT: s_not_b32 s16, s2
+; GFX8-NEXT: s_cmp_lt_u32 s16, s18
+; GFX8-NEXT: s_cselect_b32 s16, s16, s18
+; GFX8-NEXT: s_add_i32 s2, s2, s16
+; GFX8-NEXT: s_not_b32 s16, s3
+; GFX8-NEXT: s_cmp_lt_u32 s16, s19
+; GFX8-NEXT: s_cselect_b32 s16, s16, s19
+; GFX8-NEXT: s_add_i32 s3, s3, s16
+; GFX8-NEXT: s_not_b32 s16, s4
+; GFX8-NEXT: s_cmp_lt_u32 s16, s20
+; GFX8-NEXT: s_cselect_b32 s16, s16, s20
+; GFX8-NEXT: s_add_i32 s4, s4, s16
+; GFX8-NEXT: s_not_b32 s16, s5
+; GFX8-NEXT: s_cmp_lt_u32 s16, s21
+; GFX8-NEXT: s_cselect_b32 s16, s16, s21
+; GFX8-NEXT: s_add_i32 s5, s5, s16
+; GFX8-NEXT: s_not_b32 s16, s6
+; GFX8-NEXT: s_cmp_lt_u32 s16, s22
+; GFX8-NEXT: s_cselect_b32 s16, s16, s22
+; GFX8-NEXT: s_add_i32 s6, s6, s16
+; GFX8-NEXT: s_not_b32 s16, s7
+; GFX8-NEXT: s_cmp_lt_u32 s16, s23
+; GFX8-NEXT: s_cselect_b32 s16, s16, s23
+; GFX8-NEXT: s_add_i32 s7, s7, s16
+; GFX8-NEXT: s_not_b32 s16, s8
+; GFX8-NEXT: s_cmp_lt_u32 s16, s24
+; GFX8-NEXT: s_cselect_b32 s16, s16, s24
+; GFX8-NEXT: s_add_i32 s8, s8, s16
+; GFX8-NEXT: s_not_b32 s16, s9
+; GFX8-NEXT: s_cmp_lt_u32 s16, s25
+; GFX8-NEXT: s_cselect_b32 s16, s16, s25
+; GFX8-NEXT: s_add_i32 s9, s9, s16
+; GFX8-NEXT: s_not_b32 s16, s10
+; GFX8-NEXT: s_cmp_lt_u32 s16, s26
+; GFX8-NEXT: s_cselect_b32 s16, s16, s26
+; GFX8-NEXT: s_add_i32 s10, s10, s16
+; GFX8-NEXT: s_not_b32 s16, s11
+; GFX8-NEXT: s_cmp_lt_u32 s16, s27
+; GFX8-NEXT: s_cselect_b32 s16, s16, s27
+; GFX8-NEXT: s_add_i32 s11, s11, s16
+; GFX8-NEXT: s_not_b32 s16, s12
+; GFX8-NEXT: s_cmp_lt_u32 s16, s28
+; GFX8-NEXT: s_cselect_b32 s16, s16, s28
+; GFX8-NEXT: s_add_i32 s12, s12, s16
+; GFX8-NEXT: s_not_b32 s16, s13
+; GFX8-NEXT: s_cmp_lt_u32 s16, s29
+; GFX8-NEXT: s_cselect_b32 s16, s16, s29
+; GFX8-NEXT: s_add_i32 s13, s13, s16
+; GFX8-NEXT: s_not_b32 s16, s14
+; GFX8-NEXT: s_cmp_lt_u32 s16, s30
+; GFX8-NEXT: s_cselect_b32 s16, s16, s30
+; GFX8-NEXT: s_add_i32 s14, s14, s16
+; GFX8-NEXT: s_not_b32 s16, s15
+; GFX8-NEXT: s_cmp_lt_u32 s16, s31
+; GFX8-NEXT: s_cselect_b32 s16, s16, s31
+; GFX8-NEXT: s_add_i32 s15, s15, s16
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_v16i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_not_b32 s32, s0
+; GFX9-NEXT: s_cmp_lt_u32 s32, s16
+; GFX9-NEXT: s_cselect_b32 s16, s32, s16
+; GFX9-NEXT: s_add_i32 s0, s0, s16
+; GFX9-NEXT: s_not_b32 s16, s1
+; GFX9-NEXT: s_cmp_lt_u32 s16, s17
+; GFX9-NEXT: s_cselect_b32 s16, s16, s17
+; GFX9-NEXT: s_add_i32 s1, s1, s16
+; GFX9-NEXT: s_not_b32 s16, s2
+; GFX9-NEXT: s_cmp_lt_u32 s16, s18
+; GFX9-NEXT: s_cselect_b32 s16, s16, s18
+; GFX9-NEXT: s_add_i32 s2, s2, s16
+; GFX9-NEXT: s_not_b32 s16, s3
+; GFX9-NEXT: s_cmp_lt_u32 s16, s19
+; GFX9-NEXT: s_cselect_b32 s16, s16, s19
+; GFX9-NEXT: s_add_i32 s3, s3, s16
+; GFX9-NEXT: s_not_b32 s16, s4
+; GFX9-NEXT: s_cmp_lt_u32 s16, s20
+; GFX9-NEXT: s_cselect_b32 s16, s16, s20
+; GFX9-NEXT: s_add_i32 s4, s4, s16
+; GFX9-NEXT: s_not_b32 s16, s5
+; GFX9-NEXT: s_cmp_lt_u32 s16, s21
+; GFX9-NEXT: s_cselect_b32 s16, s16, s21
+; GFX9-NEXT: s_add_i32 s5, s5, s16
+; GFX9-NEXT: s_not_b32 s16, s6
+; GFX9-NEXT: s_cmp_lt_u32 s16, s22
+; GFX9-NEXT: s_cselect_b32 s16, s16, s22
+; GFX9-NEXT: s_add_i32 s6, s6, s16
+; GFX9-NEXT: s_not_b32 s16, s7
+; GFX9-NEXT: s_cmp_lt_u32 s16, s23
+; GFX9-NEXT: s_cselect_b32 s16, s16, s23
+; GFX9-NEXT: s_add_i32 s7, s7, s16
+; GFX9-NEXT: s_not_b32 s16, s8
+; GFX9-NEXT: s_cmp_lt_u32 s16, s24
+; GFX9-NEXT: s_cselect_b32 s16, s16, s24
+; GFX9-NEXT: s_add_i32 s8, s8, s16
+; GFX9-NEXT: s_not_b32 s16, s9
+; GFX9-NEXT: s_cmp_lt_u32 s16, s25
+; GFX9-NEXT: s_cselect_b32 s16, s16, s25
+; GFX9-NEXT: s_add_i32 s9, s9, s16
+; GFX9-NEXT: s_not_b32 s16, s10
+; GFX9-NEXT: s_cmp_lt_u32 s16, s26
+; GFX9-NEXT: s_cselect_b32 s16, s16, s26
+; GFX9-NEXT: s_add_i32 s10, s10, s16
+; GFX9-NEXT: s_not_b32 s16, s11
+; GFX9-NEXT: s_cmp_lt_u32 s16, s27
+; GFX9-NEXT: s_cselect_b32 s16, s16, s27
+; GFX9-NEXT: s_add_i32 s11, s11, s16
+; GFX9-NEXT: s_not_b32 s16, s12
+; GFX9-NEXT: s_cmp_lt_u32 s16, s28
+; GFX9-NEXT: s_cselect_b32 s16, s16, s28
+; GFX9-NEXT: s_add_i32 s12, s12, s16
+; GFX9-NEXT: s_not_b32 s16, s13
+; GFX9-NEXT: s_cmp_lt_u32 s16, s29
+; GFX9-NEXT: s_cselect_b32 s16, s16, s29
+; GFX9-NEXT: s_add_i32 s13, s13, s16
+; GFX9-NEXT: s_not_b32 s16, s14
+; GFX9-NEXT: s_cmp_lt_u32 s16, s30
+; GFX9-NEXT: s_cselect_b32 s16, s16, s30
+; GFX9-NEXT: s_add_i32 s14, s14, s16
+; GFX9-NEXT: s_not_b32 s16, s15
+; GFX9-NEXT: s_cmp_lt_u32 s16, s31
+; GFX9-NEXT: s_cselect_b32 s16, s16, s31
+; GFX9-NEXT: s_add_i32 s15, s15, s16
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_v16i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_not_b32 s46, s0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lt_u32 s46, s16
+; GFX10-NEXT: s_cselect_b32 s46, s46, s16
+; GFX10-NEXT: s_not_b32 s47, s1
+; GFX10-NEXT: s_add_i32 s0, s0, s46
+; GFX10-NEXT: s_cmp_lt_u32 s47, s17
+; GFX10-NEXT: s_cselect_b32 s46, s47, s17
+; GFX10-NEXT: s_not_b32 s17, s2
+; GFX10-NEXT: s_add_i32 s1, s1, s46
+; GFX10-NEXT: s_cmp_lt_u32 s17, s18
+; GFX10-NEXT: s_cselect_b32 s16, s17, s18
+; GFX10-NEXT: s_not_b32 s17, s3
+; GFX10-NEXT: s_add_i32 s2, s2, s16
+; GFX10-NEXT: s_cmp_lt_u32 s17, s19
+; GFX10-NEXT: s_cselect_b32 s16, s17, s19
+; GFX10-NEXT: s_not_b32 s17, s4
+; GFX10-NEXT: s_add_i32 s3, s3, s16
+; GFX10-NEXT: s_cmp_lt_u32 s17, s20
+; GFX10-NEXT: s_cselect_b32 s16, s17, s20
+; GFX10-NEXT: s_not_b32 s17, s5
+; GFX10-NEXT: s_add_i32 s4, s4, s16
+; GFX10-NEXT: s_cmp_lt_u32 s17, s21
+; GFX10-NEXT: s_cselect_b32 s16, s17, s21
+; GFX10-NEXT: s_not_b32 s17, s6
+; GFX10-NEXT: s_add_i32 s5, s5, s16
+; GFX10-NEXT: s_cmp_lt_u32 s17, s22
+; GFX10-NEXT: s_cselect_b32 s16, s17, s22
+; GFX10-NEXT: s_not_b32 s17, s7
+; GFX10-NEXT: s_add_i32 s6, s6, s16
+; GFX10-NEXT: s_cmp_lt_u32 s17, s23
+; GFX10-NEXT: s_cselect_b32 s16, s17, s23
+; GFX10-NEXT: s_not_b32 s17, s8
+; GFX10-NEXT: s_add_i32 s7, s7, s16
+; GFX10-NEXT: s_cmp_lt_u32 s17, s24
+; GFX10-NEXT: s_cselect_b32 s16, s17, s24
+; GFX10-NEXT: s_not_b32 s17, s9
+; GFX10-NEXT: s_add_i32 s8, s8, s16
+; GFX10-NEXT: s_cmp_lt_u32 s17, s25
+; GFX10-NEXT: s_cselect_b32 s16, s17, s25
+; GFX10-NEXT: s_not_b32 s17, s10
+; GFX10-NEXT: s_add_i32 s9, s9, s16
+; GFX10-NEXT: s_cmp_lt_u32 s17, s26
+; GFX10-NEXT: s_cselect_b32 s16, s17, s26
+; GFX10-NEXT: s_not_b32 s17, s11
+; GFX10-NEXT: s_add_i32 s10, s10, s16
+; GFX10-NEXT: s_cmp_lt_u32 s17, s27
+; GFX10-NEXT: s_cselect_b32 s16, s17, s27
+; GFX10-NEXT: s_not_b32 s17, s12
+; GFX10-NEXT: s_add_i32 s11, s11, s16
+; GFX10-NEXT: s_cmp_lt_u32 s17, s28
+; GFX10-NEXT: s_cselect_b32 s16, s17, s28
+; GFX10-NEXT: s_not_b32 s17, s13
+; GFX10-NEXT: s_add_i32 s12, s12, s16
+; GFX10-NEXT: s_cmp_lt_u32 s17, s29
+; GFX10-NEXT: s_cselect_b32 s16, s17, s29
+; GFX10-NEXT: s_not_b32 s17, s14
+; GFX10-NEXT: s_add_i32 s13, s13, s16
+; GFX10-NEXT: s_cmp_lt_u32 s17, s30
+; GFX10-NEXT: s_cselect_b32 s16, s17, s30
+; GFX10-NEXT: s_not_b32 s17, s15
+; GFX10-NEXT: s_add_i32 s14, s14, s16
+; GFX10-NEXT: s_cmp_lt_u32 s17, s31
+; GFX10-NEXT: s_cselect_b32 s16, s17, s31
+; GFX10-NEXT: s_add_i32 s15, s15, s16
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32> %lhs, <16 x i32> %rhs)
+ ret <16 x i32> %result
+}
+
+define i16 @v_uaddsat_i16(i16 %lhs, i16 %rhs) {
+; GFX6-LABEL: v_uaddsat_i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_xor_b32_e32 v2, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v1, v2, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_xor_b32_e32 v2, 0xffff, v0
+; GFX8-NEXT: v_min_u16_e32 v1, v2, v1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_xor_b32_e32 v2, 0xffff, v0
+; GFX9-NEXT: v_min_u16_e32 v1, v2, v1
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_xor_b32_e32 v2, 0xffff, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_u16_e64 v1, v2, v1
+; GFX10-NEXT: v_add_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i16 @llvm.uadd.sat.i16(i16 %lhs, i16 %rhs)
+ ret i16 %result
+}
+
+define amdgpu_ps i16 @s_uaddsat_i16(i16 inreg %lhs, i16 inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_not_b32 s2, s0
+; GFX6-NEXT: s_cmp_lt_u32 s2, s1
+; GFX6-NEXT: s_cselect_b32 s1, s2, s1
+; GFX6-NEXT: s_add_i32 s0, s0, s1
+; GFX6-NEXT: s_lshr_b32 s0, s0, 16
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_xor_b32 s2, s0, 0xffff
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s2, s1
+; GFX8-NEXT: s_cselect_b32 s1, s2, s1
+; GFX8-NEXT: s_add_i32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_xor_b32 s2, s0, 0xffff
+; GFX9-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s2, s1
+; GFX9-NEXT: s_cselect_b32 s1, s2, s1
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_xor_b32 s2, s0, 0xffff
+; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX10-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lt_u32 s2, s1
+; GFX10-NEXT: s_cselect_b32 s1, s2, s1
+; GFX10-NEXT: s_add_i32 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i16 @llvm.uadd.sat.i16(i16 %lhs, i16 %rhs)
+ ret i16 %result
+}
+
+define amdgpu_ps half @uaddsat_i16_sv(i16 inreg %lhs, i16 %rhs) {
+; GFX6-LABEL: uaddsat_i16_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_not_b32 s1, s0
+; GFX6-NEXT: v_min_u32_e32 v0, s1, v0
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: uaddsat_i16_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_xor_b32 s1, s0, 0xffff
+; GFX8-NEXT: v_min_u16_e32 v0, s1, v0
+; GFX8-NEXT: v_add_u16_e32 v0, s0, v0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: uaddsat_i16_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_xor_b32 s1, s0, 0xffff
+; GFX9-NEXT: v_min_u16_e32 v0, s1, v0
+; GFX9-NEXT: v_add_u16_e32 v0, s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: uaddsat_i16_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_xor_b32 s1, s0, 0xffff
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_u16_e64 v0, s1, v0
+; GFX10-NEXT: v_add_nc_u16_e64 v0, s0, v0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i16 @llvm.uadd.sat.i16(i16 %lhs, i16 %rhs)
+ %cast = bitcast i16 %result to half
+ ret half %cast
+}
+
+define amdgpu_ps half @uaddsat_i16_vs(i16 %lhs, i16 inreg %rhs) {
+; GFX6-LABEL: uaddsat_i16_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: v_xor_b32_e32 v1, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v1, s0, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: uaddsat_i16_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_xor_b32_e32 v1, 0xffff, v0
+; GFX8-NEXT: v_min_u16_e32 v1, s0, v1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: uaddsat_i16_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_xor_b32_e32 v1, 0xffff, v0
+; GFX9-NEXT: v_min_u16_e32 v1, s0, v1
+; GFX9-NEXT: v_add_u16_e32 v0, v0, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: uaddsat_i16_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_xor_b32_e32 v1, 0xffff, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_u16_e64 v1, v1, s0
+; GFX10-NEXT: v_add_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i16 @llvm.uadd.sat.i16(i16 %lhs, i16 %rhs)
+ %cast = bitcast i16 %result to half
+ ret half %cast
+}
+
+define <2 x i16> @v_uaddsat_v2i16(<2 x i16> %lhs, <2 x i16> %rhs) {
+; GFX6-LABEL: v_uaddsat_v2i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_xor_b32_e32 v4, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v2, v4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX6-NEXT: v_xor_b32_e32 v3, -1, v1
+; GFX6-NEXT: v_min_u32_e32 v2, v3, v2
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_v2i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s4, 0xffff
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX8-NEXT: v_xor_b32_e32 v3, s4, v0
+; GFX8-NEXT: v_xor_b32_e32 v4, s4, v2
+; GFX8-NEXT: v_min_u16_e32 v3, v3, v1
+; GFX8-NEXT: v_min_u16_sdwa v1, v4, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v3
+; GFX8-NEXT: v_add_u16_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, -1, -1
+; GFX9-NEXT: v_xor_b32_e32 v2, s4, v0
+; GFX9-NEXT: v_pk_min_u16 v1, v2, v1
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, -1, -1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_xor_b32_e32 v2, s4, v0
+; GFX10-NEXT: v_pk_min_u16 v1, v2, v1
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ ret <2 x i16> %result
+}
+
+define amdgpu_ps i32 @s_uaddsat_v2i16(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_v2i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_not_b32 s4, s0
+; GFX6-NEXT: s_cmp_lt_u32 s4, s2
+; GFX6-NEXT: s_cselect_b32 s2, s4, s2
+; GFX6-NEXT: s_add_i32 s0, s0, s2
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s2, s3, 16
+; GFX6-NEXT: s_lshr_b32 s0, s0, 16
+; GFX6-NEXT: s_not_b32 s3, s1
+; GFX6-NEXT: s_cmp_lt_u32 s3, s2
+; GFX6-NEXT: s_cselect_b32 s2, s3, s2
+; GFX6-NEXT: s_add_i32 s1, s1, s2
+; GFX6-NEXT: s_mov_b32 s2, 0xffff
+; GFX6-NEXT: s_lshr_b32 s1, s1, 16
+; GFX6-NEXT: s_and_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s0, s0, s2
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_v2i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_mov_b32 s4, 0xffff
+; GFX8-NEXT: s_xor_b32 s5, s0, s4
+; GFX8-NEXT: s_lshr_b32 s3, s1, 16
+; GFX8-NEXT: s_lshr_b32 s2, s0, 16
+; GFX8-NEXT: s_bfe_u32 s5, s5, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s5, s1
+; GFX8-NEXT: s_cselect_b32 s1, s5, s1
+; GFX8-NEXT: s_add_i32 s0, s0, s1
+; GFX8-NEXT: s_xor_b32 s1, s2, s4
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s1, s3
+; GFX8-NEXT: s_cselect_b32 s1, s1, s3
+; GFX8-NEXT: s_add_i32 s2, s2, s1
+; GFX8-NEXT: s_bfe_u32 s1, s2, 0x100000
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s1, s1, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, -1, -1
+; GFX9-NEXT: s_xor_b32 s2, s0, s2
+; GFX9-NEXT: s_mov_b32 s4, 0xffff
+; GFX9-NEXT: s_lshr_b32 s3, s2, 16
+; GFX9-NEXT: s_lshr_b32 s5, s1, 16
+; GFX9-NEXT: s_and_b32 s2, s2, s4
+; GFX9-NEXT: s_and_b32 s1, s1, s4
+; GFX9-NEXT: s_cmp_lt_u32 s2, s1
+; GFX9-NEXT: s_cselect_b32 s1, s2, s1
+; GFX9-NEXT: s_cmp_lt_u32 s3, s5
+; GFX9-NEXT: s_cselect_b32 s2, s3, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s2
+; GFX9-NEXT: s_lshr_b32 s2, s0, 16
+; GFX9-NEXT: s_lshr_b32 s3, s1, 16
+; GFX9-NEXT: s_add_i32 s0, s0, s1
+; GFX9-NEXT: s_add_i32 s2, s2, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, -1, -1
+; GFX10-NEXT: s_mov_b32 s3, 0xffff
+; GFX10-NEXT: s_xor_b32 s2, s0, s2
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_lshr_b32 s4, s2, 16
+; GFX10-NEXT: s_and_b32 s2, s2, s3
+; GFX10-NEXT: s_and_b32 s3, s1, s3
+; GFX10-NEXT: s_lshr_b32 s1, s1, 16
+; GFX10-NEXT: s_cmp_lt_u32 s2, s3
+; GFX10-NEXT: s_cselect_b32 s2, s2, s3
+; GFX10-NEXT: s_cmp_lt_u32 s4, s1
+; GFX10-NEXT: s_cselect_b32 s1, s4, s1
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s2, s1
+; GFX10-NEXT: s_lshr_b32 s2, s0, 16
+; GFX10-NEXT: s_lshr_b32 s3, s1, 16
+; GFX10-NEXT: s_add_i32 s0, s0, s1
+; GFX10-NEXT: s_add_i32 s2, s2, s3
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s2
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ %cast = bitcast <2 x i16> %result to i32
+ ret i32 %cast
+}
+
+define amdgpu_ps float @uaddsat_v2i16_sv(<2 x i16> inreg %lhs, <2 x i16> %rhs) {
+; GFX6-LABEL: uaddsat_v2i16_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_not_b32 s2, s0
+; GFX6-NEXT: v_min_u32_e32 v0, s2, v0
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; GFX6-NEXT: s_lshl_b32 s0, s1, 16
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_not_b32 s1, s0
+; GFX6-NEXT: v_min_u32_e32 v1, s1, v1
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, s0, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s0, 0xffff
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s0, v1
+; GFX6-NEXT: v_and_b32_e32 v0, s0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: uaddsat_v2i16_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_mov_b32 s2, 0xffff
+; GFX8-NEXT: s_lshr_b32 s1, s0, 16
+; GFX8-NEXT: s_xor_b32 s3, s0, s2
+; GFX8-NEXT: s_xor_b32 s2, s1, s2
+; GFX8-NEXT: v_mov_b32_e32 v2, s2
+; GFX8-NEXT: v_min_u16_e32 v1, s3, v0
+; GFX8-NEXT: v_min_u16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: v_add_u16_e32 v1, s0, v1
+; GFX8-NEXT: v_add_u16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: uaddsat_v2i16_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, -1, -1
+; GFX9-NEXT: s_xor_b32 s1, s0, s1
+; GFX9-NEXT: v_pk_min_u16 v0, s1, v0
+; GFX9-NEXT: v_pk_add_u16 v0, s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: uaddsat_v2i16_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, -1, -1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_xor_b32 s1, s0, s1
+; GFX10-NEXT: v_pk_min_u16 v0, s1, v0
+; GFX10-NEXT: v_pk_add_u16 v0, s0, v0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ %cast = bitcast <2 x i16> %result to float
+ ret float %cast
+}
+
+define amdgpu_ps float @uaddsat_v2i16_vs(<2 x i16> %lhs, <2 x i16> inreg %rhs) {
+; GFX6-LABEL: uaddsat_v2i16_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: v_xor_b32_e32 v2, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v2, s0, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: s_lshl_b32 s0, s1, 16
+; GFX6-NEXT: v_xor_b32_e32 v2, -1, v1
+; GFX6-NEXT: v_min_u32_e32 v2, s0, v2
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s0, 0xffff
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s0, v1
+; GFX6-NEXT: v_and_b32_e32 v0, s0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: uaddsat_v2i16_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_mov_b32 s2, 0xffff
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: v_xor_b32_e32 v2, s2, v0
+; GFX8-NEXT: s_lshr_b32 s1, s0, 16
+; GFX8-NEXT: v_xor_b32_e32 v3, s2, v1
+; GFX8-NEXT: v_min_u16_e32 v2, s0, v2
+; GFX8-NEXT: v_min_u16_e32 v3, s1, v3
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v2
+; GFX8-NEXT: v_add_u16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: uaddsat_v2i16_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, -1, -1
+; GFX9-NEXT: v_xor_b32_e32 v1, s1, v0
+; GFX9-NEXT: v_pk_min_u16 v1, v1, s0
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: uaddsat_v2i16_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, -1, -1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_xor_b32_e32 v1, s1, v0
+; GFX10-NEXT: v_pk_min_u16 v1, v1, s0
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ %cast = bitcast <2 x i16> %result to float
+ ret float %cast
+}
+
+; FIXME: v3i16 insert/extract
+; define <3 x i16> @v_uaddsat_v3i16(<3 x i16> %lhs, <3 x i16> %rhs) {
+; %result = call <3 x i16> @llvm.uadd.sat.v3i16(<3 x i16> %lhs, <3 x i16> %rhs)
+; ret <3 x i16> %result
+; }
+
+; define amdgpu_ps <3 x i16> @s_uaddsat_v3i16(<3 x i16> inreg %lhs, <3 x i16> inreg %rhs) {
+; %result = call <3 x i16> @llvm.uadd.sat.v3i16(<3 x i16> %lhs, <3 x i16> %rhs)
+; ret <3 x i16> %result
+; }
+
+define <2 x float> @v_uaddsat_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; GFX6-LABEL: v_uaddsat_v4i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_xor_b32_e32 v8, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v4, v8, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v5
+; GFX6-NEXT: v_xor_b32_e32 v5, -1, v1
+; GFX6-NEXT: v_min_u32_e32 v4, v5, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX6-NEXT: v_xor_b32_e32 v5, -1, v2
+; GFX6-NEXT: v_min_u32_e32 v4, v5, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v7
+; GFX6-NEXT: v_xor_b32_e32 v5, -1, v3
+; GFX6-NEXT: v_min_u32_e32 v4, v5, v4
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v4
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_v4i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s4, 0xffff
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX8-NEXT: v_xor_b32_e32 v6, s4, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX8-NEXT: v_xor_b32_e32 v7, s4, v4
+; GFX8-NEXT: v_min_u16_e32 v6, v6, v2
+; GFX8-NEXT: v_min_u16_sdwa v2, v7, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_xor_b32_e32 v7, s4, v1
+; GFX8-NEXT: v_xor_b32_e32 v8, s4, v5
+; GFX8-NEXT: v_min_u16_e32 v7, v7, v3
+; GFX8-NEXT: v_min_u16_sdwa v3, v8, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v6
+; GFX8-NEXT: v_add_u16_sdwa v2, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_add_u16_e32 v1, v1, v7
+; GFX8-NEXT: v_add_u16_sdwa v2, v5, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_v4i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, -1, -1
+; GFX9-NEXT: v_xor_b32_e32 v4, s4, v0
+; GFX9-NEXT: v_pk_min_u16 v2, v4, v2
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v2
+; GFX9-NEXT: v_xor_b32_e32 v2, s4, v1
+; GFX9-NEXT: v_pk_min_u16 v2, v2, v3
+; GFX9-NEXT: v_pk_add_u16 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_v4i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, -1, -1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_xor_b32_e32 v4, s4, v0
+; GFX10-NEXT: v_xor_b32_e32 v5, s4, v1
+; GFX10-NEXT: v_pk_min_u16 v2, v4, v2
+; GFX10-NEXT: v_pk_min_u16 v3, v5, v3
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v2
+; GFX10-NEXT: v_pk_add_u16 v1, v1, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+ %cast = bitcast <4 x i16> %result to <2 x float>
+ ret <2 x float> %cast
+}
+
+define amdgpu_ps <2 x i32> @s_uaddsat_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_v4i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_not_b32 s8, s0
+; GFX6-NEXT: s_cmp_lt_u32 s8, s4
+; GFX6-NEXT: s_cselect_b32 s4, s8, s4
+; GFX6-NEXT: s_add_i32 s0, s0, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s4, s5, 16
+; GFX6-NEXT: s_lshr_b32 s0, s0, 16
+; GFX6-NEXT: s_not_b32 s5, s1
+; GFX6-NEXT: s_cmp_lt_u32 s5, s4
+; GFX6-NEXT: s_cselect_b32 s4, s5, s4
+; GFX6-NEXT: s_add_i32 s1, s1, s4
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_lshr_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s4, s6, 16
+; GFX6-NEXT: s_not_b32 s5, s2
+; GFX6-NEXT: s_cmp_lt_u32 s5, s4
+; GFX6-NEXT: s_cselect_b32 s4, s5, s4
+; GFX6-NEXT: s_add_i32 s2, s2, s4
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_lshr_b32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s4, s7, 16
+; GFX6-NEXT: s_not_b32 s5, s3
+; GFX6-NEXT: s_cmp_lt_u32 s5, s4
+; GFX6-NEXT: s_cselect_b32 s4, s5, s4
+; GFX6-NEXT: s_add_i32 s3, s3, s4
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: s_and_b32 s1, s1, s4
+; GFX6-NEXT: s_lshr_b32 s3, s3, 16
+; GFX6-NEXT: s_and_b32 s0, s0, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s4
+; GFX6-NEXT: s_and_b32 s2, s3, s4
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_or_b32 s1, s1, s2
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_v4i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_mov_b32 s8, 0xffff
+; GFX8-NEXT: s_xor_b32 s9, s0, s8
+; GFX8-NEXT: s_lshr_b32 s6, s2, 16
+; GFX8-NEXT: s_lshr_b32 s4, s0, 16
+; GFX8-NEXT: s_lshr_b32 s5, s1, 16
+; GFX8-NEXT: s_lshr_b32 s7, s3, 16
+; GFX8-NEXT: s_bfe_u32 s9, s9, 0x100000
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s9, s2
+; GFX8-NEXT: s_cselect_b32 s2, s9, s2
+; GFX8-NEXT: s_add_i32 s0, s0, s2
+; GFX8-NEXT: s_xor_b32 s2, s4, s8
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_bfe_u32 s6, s6, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s2, s6
+; GFX8-NEXT: s_cselect_b32 s2, s2, s6
+; GFX8-NEXT: s_add_i32 s4, s4, s2
+; GFX8-NEXT: s_xor_b32 s2, s1, s8
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s2, s3
+; GFX8-NEXT: s_cselect_b32 s2, s2, s3
+; GFX8-NEXT: s_add_i32 s1, s1, s2
+; GFX8-NEXT: s_xor_b32 s2, s5, s8
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_bfe_u32 s3, s7, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s2, s3
+; GFX8-NEXT: s_cselect_b32 s2, s2, s3
+; GFX8-NEXT: s_add_i32 s5, s5, s2
+; GFX8-NEXT: s_bfe_u32 s2, s4, 0x100000
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s2, s2, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s2
+; GFX8-NEXT: s_bfe_u32 s2, s5, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_lshl_b32 s2, s2, 16
+; GFX8-NEXT: s_or_b32 s1, s1, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_v4i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, -1, -1
+; GFX9-NEXT: s_xor_b32 s5, s0, s4
+; GFX9-NEXT: s_mov_b32 s7, 0xffff
+; GFX9-NEXT: s_lshr_b32 s6, s5, 16
+; GFX9-NEXT: s_lshr_b32 s8, s2, 16
+; GFX9-NEXT: s_and_b32 s5, s5, s7
+; GFX9-NEXT: s_and_b32 s2, s2, s7
+; GFX9-NEXT: s_cmp_lt_u32 s5, s2
+; GFX9-NEXT: s_cselect_b32 s2, s5, s2
+; GFX9-NEXT: s_cmp_lt_u32 s6, s8
+; GFX9-NEXT: s_cselect_b32 s5, s6, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s5
+; GFX9-NEXT: s_lshr_b32 s5, s0, 16
+; GFX9-NEXT: s_lshr_b32 s6, s2, 16
+; GFX9-NEXT: s_add_i32 s0, s0, s2
+; GFX9-NEXT: s_xor_b32 s2, s1, s4
+; GFX9-NEXT: s_add_i32 s5, s5, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s5
+; GFX9-NEXT: s_lshr_b32 s4, s2, 16
+; GFX9-NEXT: s_lshr_b32 s5, s3, 16
+; GFX9-NEXT: s_and_b32 s2, s2, s7
+; GFX9-NEXT: s_and_b32 s3, s3, s7
+; GFX9-NEXT: s_cmp_lt_u32 s2, s3
+; GFX9-NEXT: s_cselect_b32 s2, s2, s3
+; GFX9-NEXT: s_cmp_lt_u32 s4, s5
+; GFX9-NEXT: s_cselect_b32 s3, s4, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s3
+; GFX9-NEXT: s_lshr_b32 s3, s1, 16
+; GFX9-NEXT: s_lshr_b32 s4, s2, 16
+; GFX9-NEXT: s_add_i32 s1, s1, s2
+; GFX9-NEXT: s_add_i32 s3, s3, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_v4i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, -1, -1
+; GFX10-NEXT: s_mov_b32 s6, 0xffff
+; GFX10-NEXT: s_xor_b32 s5, s0, s4
+; GFX10-NEXT: s_and_b32 s8, s2, s6
+; GFX10-NEXT: s_lshr_b32 s7, s5, 16
+; GFX10-NEXT: s_and_b32 s5, s5, s6
+; GFX10-NEXT: s_lshr_b32 s2, s2, 16
+; GFX10-NEXT: s_cmp_lt_u32 s5, s8
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s5, s5, s8
+; GFX10-NEXT: s_cmp_lt_u32 s7, s2
+; GFX10-NEXT: s_cselect_b32 s2, s7, s2
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s5, s2
+; GFX10-NEXT: s_lshr_b32 s5, s0, 16
+; GFX10-NEXT: s_lshr_b32 s7, s2, 16
+; GFX10-NEXT: s_add_i32 s0, s0, s2
+; GFX10-NEXT: s_xor_b32 s2, s1, s4
+; GFX10-NEXT: s_add_i32 s5, s5, s7
+; GFX10-NEXT: s_lshr_b32 s4, s2, 16
+; GFX10-NEXT: s_and_b32 s2, s2, s6
+; GFX10-NEXT: s_and_b32 s6, s3, s6
+; GFX10-NEXT: s_lshr_b32 s3, s3, 16
+; GFX10-NEXT: s_cmp_lt_u32 s2, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s5
+; GFX10-NEXT: s_cselect_b32 s2, s2, s6
+; GFX10-NEXT: s_cmp_lt_u32 s4, s3
+; GFX10-NEXT: s_cselect_b32 s3, s4, s3
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s3
+; GFX10-NEXT: s_lshr_b32 s3, s1, 16
+; GFX10-NEXT: s_lshr_b32 s4, s2, 16
+; GFX10-NEXT: s_add_i32 s1, s1, s2
+; GFX10-NEXT: s_add_i32 s3, s3, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+ %cast = bitcast <4 x i16> %result to <2 x i32>
+ ret <2 x i32> %cast
+}
+
+; FIXME
+; define <5 x i16> @v_uaddsat_v5i16(<5 x i16> %lhs, <5 x i16> %rhs) {
+; %result = call <5 x i16> @llvm.uadd.sat.v5i16(<5 x i16> %lhs, <5 x i16> %rhs)
+; ret <5 x i16> %result
+; }
+
+; define amdgpu_ps <5 x i16> @s_uaddsat_v5i16(<5 x i16> inreg %lhs, <5 x i16> inreg %rhs) {
+; %result = call <5 x i16> @llvm.uadd.sat.v5i16(<5 x i16> %lhs, <5 x i16> %rhs)
+; ret <5 x i16> %result
+; }
+
+define <3 x float> @v_uaddsat_v6i16(<6 x i16> %lhs, <6 x i16> %rhs) {
+; GFX6-LABEL: v_uaddsat_v6i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX6-NEXT: v_xor_b32_e32 v12, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v6, v12, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v7
+; GFX6-NEXT: v_xor_b32_e32 v7, -1, v1
+; GFX6-NEXT: v_min_u32_e32 v6, v7, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v8
+; GFX6-NEXT: v_xor_b32_e32 v7, -1, v2
+; GFX6-NEXT: v_min_u32_e32 v6, v7, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX6-NEXT: v_xor_b32_e32 v7, -1, v3
+; GFX6-NEXT: v_min_u32_e32 v6, v7, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v10
+; GFX6-NEXT: v_xor_b32_e32 v7, -1, v4
+; GFX6-NEXT: v_min_u32_e32 v6, v7, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v6
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v11
+; GFX6-NEXT: v_xor_b32_e32 v7, -1, v5
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_min_u32_e32 v6, v7, v6
+; GFX6-NEXT: v_add_i32_e32 v5, vcc, v5, v6
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_and_b32_e32 v3, s4, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_v6i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s4, 0xffff
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX8-NEXT: v_xor_b32_e32 v9, s4, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX8-NEXT: v_xor_b32_e32 v10, s4, v6
+; GFX8-NEXT: v_min_u16_e32 v9, v9, v3
+; GFX8-NEXT: v_min_u16_sdwa v3, v10, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_xor_b32_e32 v10, s4, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v2
+; GFX8-NEXT: v_xor_b32_e32 v11, s4, v7
+; GFX8-NEXT: v_min_u16_e32 v10, v10, v4
+; GFX8-NEXT: v_min_u16_sdwa v4, v11, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_xor_b32_e32 v11, s4, v2
+; GFX8-NEXT: v_xor_b32_e32 v12, s4, v8
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v9
+; GFX8-NEXT: v_add_u16_sdwa v3, v6, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_u16_e32 v11, v11, v5
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v3
+; GFX8-NEXT: v_min_u16_sdwa v5, v12, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_u16_e32 v1, v1, v10
+; GFX8-NEXT: v_add_u16_sdwa v3, v7, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_add_u16_e32 v2, v2, v11
+; GFX8-NEXT: v_add_u16_sdwa v3, v8, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_v6i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, -1, -1
+; GFX9-NEXT: v_xor_b32_e32 v6, s4, v0
+; GFX9-NEXT: v_pk_min_u16 v3, v6, v3
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v3
+; GFX9-NEXT: v_xor_b32_e32 v3, s4, v1
+; GFX9-NEXT: v_pk_min_u16 v3, v3, v4
+; GFX9-NEXT: v_pk_add_u16 v1, v1, v3
+; GFX9-NEXT: v_xor_b32_e32 v3, s4, v2
+; GFX9-NEXT: v_pk_min_u16 v3, v3, v5
+; GFX9-NEXT: v_pk_add_u16 v2, v2, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_v6i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, -1, -1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_xor_b32_e32 v6, s4, v0
+; GFX10-NEXT: v_xor_b32_e32 v7, s4, v1
+; GFX10-NEXT: v_xor_b32_e32 v8, s4, v2
+; GFX10-NEXT: v_pk_min_u16 v3, v6, v3
+; GFX10-NEXT: v_pk_min_u16 v4, v7, v4
+; GFX10-NEXT: v_pk_min_u16 v5, v8, v5
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v3
+; GFX10-NEXT: v_pk_add_u16 v1, v1, v4
+; GFX10-NEXT: v_pk_add_u16 v2, v2, v5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <6 x i16> @llvm.uadd.sat.v6i16(<6 x i16> %lhs, <6 x i16> %rhs)
+ %cast = bitcast <6 x i16> %result to <3 x float>
+ ret <3 x float> %cast
+}
+
+define amdgpu_ps <3 x i32> @s_uaddsat_v6i16(<6 x i16> inreg %lhs, <6 x i16> inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_v6i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s6, s6, 16
+; GFX6-NEXT: s_not_b32 s12, s0
+; GFX6-NEXT: s_cmp_lt_u32 s12, s6
+; GFX6-NEXT: s_cselect_b32 s6, s12, s6
+; GFX6-NEXT: s_add_i32 s0, s0, s6
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s6, s7, 16
+; GFX6-NEXT: s_lshr_b32 s0, s0, 16
+; GFX6-NEXT: s_not_b32 s7, s1
+; GFX6-NEXT: s_cmp_lt_u32 s7, s6
+; GFX6-NEXT: s_cselect_b32 s6, s7, s6
+; GFX6-NEXT: s_add_i32 s1, s1, s6
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_lshr_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s6, s8, 16
+; GFX6-NEXT: s_not_b32 s7, s2
+; GFX6-NEXT: s_cmp_lt_u32 s7, s6
+; GFX6-NEXT: s_cselect_b32 s6, s7, s6
+; GFX6-NEXT: s_add_i32 s2, s2, s6
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_lshr_b32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s6, s9, 16
+; GFX6-NEXT: s_not_b32 s7, s3
+; GFX6-NEXT: s_cmp_lt_u32 s7, s6
+; GFX6-NEXT: s_cselect_b32 s6, s7, s6
+; GFX6-NEXT: s_add_i32 s3, s3, s6
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_lshr_b32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s6, s10, 16
+; GFX6-NEXT: s_not_b32 s7, s4
+; GFX6-NEXT: s_cmp_lt_u32 s7, s6
+; GFX6-NEXT: s_cselect_b32 s6, s7, s6
+; GFX6-NEXT: s_add_i32 s4, s4, s6
+; GFX6-NEXT: s_lshl_b32 s5, s5, 16
+; GFX6-NEXT: s_lshr_b32 s4, s4, 16
+; GFX6-NEXT: s_lshl_b32 s6, s11, 16
+; GFX6-NEXT: s_not_b32 s7, s5
+; GFX6-NEXT: s_cmp_lt_u32 s7, s6
+; GFX6-NEXT: s_cselect_b32 s6, s7, s6
+; GFX6-NEXT: s_add_i32 s5, s5, s6
+; GFX6-NEXT: s_mov_b32 s6, 0xffff
+; GFX6-NEXT: s_and_b32 s1, s1, s6
+; GFX6-NEXT: s_and_b32 s0, s0, s6
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s6
+; GFX6-NEXT: s_and_b32 s2, s3, s6
+; GFX6-NEXT: s_lshr_b32 s5, s5, 16
+; GFX6-NEXT: s_and_b32 s3, s5, s6
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_or_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s2, s4, s6
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_or_b32 s2, s2, s3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_v6i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_mov_b32 s12, 0xffff
+; GFX8-NEXT: s_xor_b32 s13, s0, s12
+; GFX8-NEXT: s_lshr_b32 s9, s3, 16
+; GFX8-NEXT: s_lshr_b32 s6, s0, 16
+; GFX8-NEXT: s_lshr_b32 s7, s1, 16
+; GFX8-NEXT: s_lshr_b32 s8, s2, 16
+; GFX8-NEXT: s_lshr_b32 s10, s4, 16
+; GFX8-NEXT: s_lshr_b32 s11, s5, 16
+; GFX8-NEXT: s_bfe_u32 s13, s13, 0x100000
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s13, s3
+; GFX8-NEXT: s_cselect_b32 s3, s13, s3
+; GFX8-NEXT: s_add_i32 s0, s0, s3
+; GFX8-NEXT: s_xor_b32 s3, s6, s12
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_bfe_u32 s9, s9, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s3, s9
+; GFX8-NEXT: s_cselect_b32 s3, s3, s9
+; GFX8-NEXT: s_add_i32 s6, s6, s3
+; GFX8-NEXT: s_xor_b32 s3, s1, s12
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_add_i32 s1, s1, s3
+; GFX8-NEXT: s_xor_b32 s3, s7, s12
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_bfe_u32 s4, s10, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_add_i32 s7, s7, s3
+; GFX8-NEXT: s_xor_b32 s3, s2, s12
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_bfe_u32 s4, s5, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_add_i32 s2, s2, s3
+; GFX8-NEXT: s_xor_b32 s3, s8, s12
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_bfe_u32 s4, s11, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_add_i32 s8, s8, s3
+; GFX8-NEXT: s_bfe_u32 s3, s6, 0x100000
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s3, s3, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s3
+; GFX8-NEXT: s_bfe_u32 s3, s7, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_lshl_b32 s3, s3, 16
+; GFX8-NEXT: s_or_b32 s1, s1, s3
+; GFX8-NEXT: s_bfe_u32 s3, s8, 0x100000
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_lshl_b32 s3, s3, 16
+; GFX8-NEXT: s_or_b32 s2, s2, s3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_v6i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, -1, -1
+; GFX9-NEXT: s_xor_b32 s7, s0, s6
+; GFX9-NEXT: s_mov_b32 s9, 0xffff
+; GFX9-NEXT: s_lshr_b32 s8, s7, 16
+; GFX9-NEXT: s_lshr_b32 s10, s3, 16
+; GFX9-NEXT: s_and_b32 s7, s7, s9
+; GFX9-NEXT: s_and_b32 s3, s3, s9
+; GFX9-NEXT: s_cmp_lt_u32 s7, s3
+; GFX9-NEXT: s_cselect_b32 s3, s7, s3
+; GFX9-NEXT: s_cmp_lt_u32 s8, s10
+; GFX9-NEXT: s_cselect_b32 s7, s8, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s7
+; GFX9-NEXT: s_lshr_b32 s7, s0, 16
+; GFX9-NEXT: s_lshr_b32 s8, s3, 16
+; GFX9-NEXT: s_add_i32 s0, s0, s3
+; GFX9-NEXT: s_add_i32 s7, s7, s8
+; GFX9-NEXT: s_xor_b32 s3, s1, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s7
+; GFX9-NEXT: s_lshr_b32 s7, s3, 16
+; GFX9-NEXT: s_lshr_b32 s8, s4, 16
+; GFX9-NEXT: s_and_b32 s3, s3, s9
+; GFX9-NEXT: s_and_b32 s4, s4, s9
+; GFX9-NEXT: s_cmp_lt_u32 s3, s4
+; GFX9-NEXT: s_cselect_b32 s3, s3, s4
+; GFX9-NEXT: s_cmp_lt_u32 s7, s8
+; GFX9-NEXT: s_cselect_b32 s4, s7, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4
+; GFX9-NEXT: s_lshr_b32 s4, s1, 16
+; GFX9-NEXT: s_lshr_b32 s7, s3, 16
+; GFX9-NEXT: s_add_i32 s1, s1, s3
+; GFX9-NEXT: s_add_i32 s4, s4, s7
+; GFX9-NEXT: s_xor_b32 s3, s2, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s4
+; GFX9-NEXT: s_lshr_b32 s4, s3, 16
+; GFX9-NEXT: s_lshr_b32 s6, s5, 16
+; GFX9-NEXT: s_and_b32 s3, s3, s9
+; GFX9-NEXT: s_and_b32 s5, s5, s9
+; GFX9-NEXT: s_cmp_lt_u32 s3, s5
+; GFX9-NEXT: s_cselect_b32 s3, s3, s5
+; GFX9-NEXT: s_cmp_lt_u32 s4, s6
+; GFX9-NEXT: s_cselect_b32 s4, s4, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4
+; GFX9-NEXT: s_lshr_b32 s4, s2, 16
+; GFX9-NEXT: s_lshr_b32 s5, s3, 16
+; GFX9-NEXT: s_add_i32 s2, s2, s3
+; GFX9-NEXT: s_add_i32 s4, s4, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_v6i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, -1, -1
+; GFX10-NEXT: s_mov_b32 s8, 0xffff
+; GFX10-NEXT: s_xor_b32 s7, s0, s6
+; GFX10-NEXT: s_and_b32 s10, s3, s8
+; GFX10-NEXT: s_lshr_b32 s9, s7, 16
+; GFX10-NEXT: s_and_b32 s7, s7, s8
+; GFX10-NEXT: s_lshr_b32 s3, s3, 16
+; GFX10-NEXT: s_cmp_lt_u32 s7, s10
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s7, s7, s10
+; GFX10-NEXT: s_cmp_lt_u32 s9, s3
+; GFX10-NEXT: s_cselect_b32 s3, s9, s3
+; GFX10-NEXT: s_and_b32 s10, s4, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s7, s3
+; GFX10-NEXT: s_lshr_b32 s7, s0, 16
+; GFX10-NEXT: s_lshr_b32 s9, s3, 16
+; GFX10-NEXT: s_add_i32 s0, s0, s3
+; GFX10-NEXT: s_xor_b32 s3, s1, s6
+; GFX10-NEXT: s_add_i32 s7, s7, s9
+; GFX10-NEXT: s_lshr_b32 s9, s3, 16
+; GFX10-NEXT: s_and_b32 s3, s3, s8
+; GFX10-NEXT: s_lshr_b32 s4, s4, 16
+; GFX10-NEXT: s_cmp_lt_u32 s3, s10
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s7
+; GFX10-NEXT: s_cselect_b32 s3, s3, s10
+; GFX10-NEXT: s_cmp_lt_u32 s9, s4
+; GFX10-NEXT: s_cselect_b32 s4, s9, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s4
+; GFX10-NEXT: s_lshr_b32 s4, s1, 16
+; GFX10-NEXT: s_lshr_b32 s9, s3, 16
+; GFX10-NEXT: s_add_i32 s1, s1, s3
+; GFX10-NEXT: s_xor_b32 s3, s2, s6
+; GFX10-NEXT: s_add_i32 s4, s4, s9
+; GFX10-NEXT: s_lshr_b32 s6, s3, 16
+; GFX10-NEXT: s_and_b32 s3, s3, s8
+; GFX10-NEXT: s_and_b32 s8, s5, s8
+; GFX10-NEXT: s_lshr_b32 s5, s5, 16
+; GFX10-NEXT: s_cmp_lt_u32 s3, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s4
+; GFX10-NEXT: s_cselect_b32 s3, s3, s8
+; GFX10-NEXT: s_cmp_lt_u32 s6, s5
+; GFX10-NEXT: s_cselect_b32 s5, s6, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s5
+; GFX10-NEXT: s_lshr_b32 s5, s2, 16
+; GFX10-NEXT: s_lshr_b32 s6, s3, 16
+; GFX10-NEXT: s_add_i32 s2, s2, s3
+; GFX10-NEXT: s_add_i32 s5, s5, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s5
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <6 x i16> @llvm.uadd.sat.v6i16(<6 x i16> %lhs, <6 x i16> %rhs)
+ %cast = bitcast <6 x i16> %result to <3 x i32>
+ ret <3 x i32> %cast
+}
+
+define <4 x float> @v_uaddsat_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; GFX6-LABEL: v_uaddsat_v8i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; GFX6-NEXT: v_xor_b32_e32 v16, -1, v0
+; GFX6-NEXT: v_min_u32_e32 v8, v16, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v9
+; GFX6-NEXT: v_xor_b32_e32 v9, -1, v1
+; GFX6-NEXT: v_min_u32_e32 v8, v9, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_add_i32_e32 v1, vcc, v1, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v10
+; GFX6-NEXT: v_xor_b32_e32 v9, -1, v2
+; GFX6-NEXT: v_min_u32_e32 v8, v9, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v11
+; GFX6-NEXT: v_xor_b32_e32 v9, -1, v3
+; GFX6-NEXT: v_min_u32_e32 v8, v9, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_add_i32_e32 v3, vcc, v3, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v12
+; GFX6-NEXT: v_xor_b32_e32 v9, -1, v4
+; GFX6-NEXT: v_min_u32_e32 v8, v9, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v13
+; GFX6-NEXT: v_xor_b32_e32 v9, -1, v5
+; GFX6-NEXT: v_min_u32_e32 v8, v9, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX6-NEXT: v_add_i32_e32 v5, vcc, v5, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v14
+; GFX6-NEXT: v_xor_b32_e32 v9, -1, v6
+; GFX6-NEXT: v_min_u32_e32 v8, v9, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: v_add_i32_e32 v6, vcc, v6, v8
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v15
+; GFX6-NEXT: v_xor_b32_e32 v9, -1, v7
+; GFX6-NEXT: v_min_u32_e32 v8, v9, v8
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_add_i32_e32 v7, vcc, v7, v8
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_and_b32_e32 v3, s4, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v4
+; GFX6-NEXT: v_and_b32_e32 v4, s4, v7
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX6-NEXT: v_and_b32_e32 v3, s4, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_v8i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: s_mov_b32 s4, 0xffff
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX8-NEXT: v_xor_b32_e32 v12, s4, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX8-NEXT: v_xor_b32_e32 v13, s4, v8
+; GFX8-NEXT: v_min_u16_e32 v12, v12, v4
+; GFX8-NEXT: v_min_u16_sdwa v4, v13, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_xor_b32_e32 v13, s4, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX8-NEXT: v_xor_b32_e32 v14, s4, v9
+; GFX8-NEXT: v_min_u16_e32 v13, v13, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX8-NEXT: v_min_u16_sdwa v5, v14, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_xor_b32_e32 v14, s4, v2
+; GFX8-NEXT: v_xor_b32_e32 v15, s4, v10
+; GFX8-NEXT: v_add_u16_e32 v0, v0, v12
+; GFX8-NEXT: v_add_u16_sdwa v4, v8, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_u16_e32 v14, v14, v6
+; GFX8-NEXT: v_min_u16_sdwa v6, v15, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_xor_b32_e32 v15, s4, v3
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_xor_b32_e32 v16, s4, v11
+; GFX8-NEXT: v_add_u16_e32 v1, v1, v13
+; GFX8-NEXT: v_add_u16_sdwa v4, v9, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_u16_e32 v15, v15, v7
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v4
+; GFX8-NEXT: v_min_u16_sdwa v7, v16, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_add_u16_e32 v2, v2, v14
+; GFX8-NEXT: v_add_u16_sdwa v4, v10, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v4
+; GFX8-NEXT: v_add_u16_e32 v3, v3, v15
+; GFX8-NEXT: v_add_u16_sdwa v4, v11, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_v8i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, -1, -1
+; GFX9-NEXT: v_xor_b32_e32 v8, s4, v0
+; GFX9-NEXT: v_pk_min_u16 v4, v8, v4
+; GFX9-NEXT: v_pk_add_u16 v0, v0, v4
+; GFX9-NEXT: v_xor_b32_e32 v4, s4, v1
+; GFX9-NEXT: v_pk_min_u16 v4, v4, v5
+; GFX9-NEXT: v_pk_add_u16 v1, v1, v4
+; GFX9-NEXT: v_xor_b32_e32 v4, s4, v2
+; GFX9-NEXT: v_pk_min_u16 v4, v4, v6
+; GFX9-NEXT: v_pk_add_u16 v2, v2, v4
+; GFX9-NEXT: v_xor_b32_e32 v4, s4, v3
+; GFX9-NEXT: v_pk_min_u16 v4, v4, v7
+; GFX9-NEXT: v_pk_add_u16 v3, v3, v4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_v8i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, -1, -1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_xor_b32_e32 v15, s4, v0
+; GFX10-NEXT: v_xor_b32_e32 v19, s4, v1
+; GFX10-NEXT: v_xor_b32_e32 v23, s4, v2
+; GFX10-NEXT: v_xor_b32_e32 v10, s4, v3
+; GFX10-NEXT: v_pk_min_u16 v11, v15, v4
+; GFX10-NEXT: v_pk_min_u16 v15, v19, v5
+; GFX10-NEXT: v_pk_min_u16 v19, v23, v6
+; GFX10-NEXT: v_pk_min_u16 v6, v10, v7
+; GFX10-NEXT: v_pk_add_u16 v0, v0, v11
+; GFX10-NEXT: v_pk_add_u16 v1, v1, v15
+; GFX10-NEXT: v_pk_add_u16 v2, v2, v19
+; GFX10-NEXT: v_pk_add_u16 v3, v3, v6
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+ %cast = bitcast <8 x i16> %result to <4 x float>
+ ret <4 x float> %cast
+}
+
+define amdgpu_ps <4 x i32> @s_uaddsat_v8i16(<8 x i16> inreg %lhs, <8 x i16> inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_v8i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s8, s8, 16
+; GFX6-NEXT: s_not_b32 s16, s0
+; GFX6-NEXT: s_cmp_lt_u32 s16, s8
+; GFX6-NEXT: s_cselect_b32 s8, s16, s8
+; GFX6-NEXT: s_add_i32 s0, s0, s8
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s8, s9, 16
+; GFX6-NEXT: s_lshr_b32 s0, s0, 16
+; GFX6-NEXT: s_not_b32 s9, s1
+; GFX6-NEXT: s_cmp_lt_u32 s9, s8
+; GFX6-NEXT: s_cselect_b32 s8, s9, s8
+; GFX6-NEXT: s_add_i32 s1, s1, s8
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_lshr_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s8, s10, 16
+; GFX6-NEXT: s_not_b32 s9, s2
+; GFX6-NEXT: s_cmp_lt_u32 s9, s8
+; GFX6-NEXT: s_cselect_b32 s8, s9, s8
+; GFX6-NEXT: s_add_i32 s2, s2, s8
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_lshr_b32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s8, s11, 16
+; GFX6-NEXT: s_not_b32 s9, s3
+; GFX6-NEXT: s_cmp_lt_u32 s9, s8
+; GFX6-NEXT: s_cselect_b32 s8, s9, s8
+; GFX6-NEXT: s_add_i32 s3, s3, s8
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_lshr_b32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s8, s12, 16
+; GFX6-NEXT: s_not_b32 s9, s4
+; GFX6-NEXT: s_cmp_lt_u32 s9, s8
+; GFX6-NEXT: s_cselect_b32 s8, s9, s8
+; GFX6-NEXT: s_add_i32 s4, s4, s8
+; GFX6-NEXT: s_lshl_b32 s5, s5, 16
+; GFX6-NEXT: s_lshr_b32 s4, s4, 16
+; GFX6-NEXT: s_lshl_b32 s8, s13, 16
+; GFX6-NEXT: s_not_b32 s9, s5
+; GFX6-NEXT: s_cmp_lt_u32 s9, s8
+; GFX6-NEXT: s_cselect_b32 s8, s9, s8
+; GFX6-NEXT: s_add_i32 s5, s5, s8
+; GFX6-NEXT: s_lshl_b32 s6, s6, 16
+; GFX6-NEXT: s_lshr_b32 s5, s5, 16
+; GFX6-NEXT: s_lshl_b32 s8, s14, 16
+; GFX6-NEXT: s_not_b32 s9, s6
+; GFX6-NEXT: s_cmp_lt_u32 s9, s8
+; GFX6-NEXT: s_cselect_b32 s8, s9, s8
+; GFX6-NEXT: s_add_i32 s6, s6, s8
+; GFX6-NEXT: s_lshl_b32 s7, s7, 16
+; GFX6-NEXT: s_lshr_b32 s6, s6, 16
+; GFX6-NEXT: s_lshl_b32 s8, s15, 16
+; GFX6-NEXT: s_not_b32 s9, s7
+; GFX6-NEXT: s_cmp_lt_u32 s9, s8
+; GFX6-NEXT: s_cselect_b32 s8, s9, s8
+; GFX6-NEXT: s_add_i32 s7, s7, s8
+; GFX6-NEXT: s_mov_b32 s8, 0xffff
+; GFX6-NEXT: s_and_b32 s1, s1, s8
+; GFX6-NEXT: s_and_b32 s0, s0, s8
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s8
+; GFX6-NEXT: s_and_b32 s2, s3, s8
+; GFX6-NEXT: s_and_b32 s3, s5, s8
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_lshr_b32 s7, s7, 16
+; GFX6-NEXT: s_or_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s2, s4, s8
+; GFX6-NEXT: s_and_b32 s4, s7, s8
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_or_b32 s2, s2, s3
+; GFX6-NEXT: s_and_b32 s3, s6, s8
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_or_b32 s3, s3, s4
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_v8i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_mov_b32 s16, 0xffff
+; GFX8-NEXT: s_xor_b32 s17, s0, s16
+; GFX8-NEXT: s_lshr_b32 s12, s4, 16
+; GFX8-NEXT: s_lshr_b32 s8, s0, 16
+; GFX8-NEXT: s_lshr_b32 s9, s1, 16
+; GFX8-NEXT: s_lshr_b32 s10, s2, 16
+; GFX8-NEXT: s_lshr_b32 s11, s3, 16
+; GFX8-NEXT: s_lshr_b32 s13, s5, 16
+; GFX8-NEXT: s_lshr_b32 s14, s6, 16
+; GFX8-NEXT: s_lshr_b32 s15, s7, 16
+; GFX8-NEXT: s_bfe_u32 s17, s17, 0x100000
+; GFX8-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s17, s4
+; GFX8-NEXT: s_cselect_b32 s4, s17, s4
+; GFX8-NEXT: s_add_i32 s0, s0, s4
+; GFX8-NEXT: s_xor_b32 s4, s8, s16
+; GFX8-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX8-NEXT: s_bfe_u32 s12, s12, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s4, s12
+; GFX8-NEXT: s_cselect_b32 s4, s4, s12
+; GFX8-NEXT: s_add_i32 s8, s8, s4
+; GFX8-NEXT: s_xor_b32 s4, s1, s16
+; GFX8-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX8-NEXT: s_bfe_u32 s5, s5, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s1, s1, s4
+; GFX8-NEXT: s_xor_b32 s4, s9, s16
+; GFX8-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX8-NEXT: s_bfe_u32 s5, s13, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s9, s9, s4
+; GFX8-NEXT: s_xor_b32 s4, s2, s16
+; GFX8-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX8-NEXT: s_bfe_u32 s5, s6, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s2, s2, s4
+; GFX8-NEXT: s_xor_b32 s4, s10, s16
+; GFX8-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX8-NEXT: s_bfe_u32 s5, s14, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s10, s10, s4
+; GFX8-NEXT: s_xor_b32 s4, s3, s16
+; GFX8-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX8-NEXT: s_bfe_u32 s5, s7, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s3, s3, s4
+; GFX8-NEXT: s_xor_b32 s4, s11, s16
+; GFX8-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX8-NEXT: s_bfe_u32 s5, s15, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s4, s5
+; GFX8-NEXT: s_cselect_b32 s4, s4, s5
+; GFX8-NEXT: s_add_i32 s11, s11, s4
+; GFX8-NEXT: s_bfe_u32 s4, s8, 0x100000
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s4
+; GFX8-NEXT: s_bfe_u32 s4, s9, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s1, s1, s4
+; GFX8-NEXT: s_bfe_u32 s4, s10, 0x100000
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s2, s2, s4
+; GFX8-NEXT: s_bfe_u32 s4, s11, 0x100000
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s3, s3, s4
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_v8i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, -1, -1
+; GFX9-NEXT: s_xor_b32 s9, s0, s8
+; GFX9-NEXT: s_mov_b32 s11, 0xffff
+; GFX9-NEXT: s_lshr_b32 s10, s9, 16
+; GFX9-NEXT: s_lshr_b32 s12, s4, 16
+; GFX9-NEXT: s_and_b32 s9, s9, s11
+; GFX9-NEXT: s_and_b32 s4, s4, s11
+; GFX9-NEXT: s_cmp_lt_u32 s9, s4
+; GFX9-NEXT: s_cselect_b32 s4, s9, s4
+; GFX9-NEXT: s_cmp_lt_u32 s10, s12
+; GFX9-NEXT: s_cselect_b32 s9, s10, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s9
+; GFX9-NEXT: s_lshr_b32 s9, s0, 16
+; GFX9-NEXT: s_lshr_b32 s10, s4, 16
+; GFX9-NEXT: s_add_i32 s0, s0, s4
+; GFX9-NEXT: s_add_i32 s9, s9, s10
+; GFX9-NEXT: s_xor_b32 s4, s1, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s9
+; GFX9-NEXT: s_lshr_b32 s9, s4, 16
+; GFX9-NEXT: s_lshr_b32 s10, s5, 16
+; GFX9-NEXT: s_and_b32 s4, s4, s11
+; GFX9-NEXT: s_and_b32 s5, s5, s11
+; GFX9-NEXT: s_cmp_lt_u32 s4, s5
+; GFX9-NEXT: s_cselect_b32 s4, s4, s5
+; GFX9-NEXT: s_cmp_lt_u32 s9, s10
+; GFX9-NEXT: s_cselect_b32 s5, s9, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX9-NEXT: s_lshr_b32 s5, s1, 16
+; GFX9-NEXT: s_lshr_b32 s9, s4, 16
+; GFX9-NEXT: s_add_i32 s1, s1, s4
+; GFX9-NEXT: s_add_i32 s5, s5, s9
+; GFX9-NEXT: s_xor_b32 s4, s2, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s5
+; GFX9-NEXT: s_lshr_b32 s5, s4, 16
+; GFX9-NEXT: s_lshr_b32 s9, s6, 16
+; GFX9-NEXT: s_and_b32 s4, s4, s11
+; GFX9-NEXT: s_and_b32 s6, s6, s11
+; GFX9-NEXT: s_cmp_lt_u32 s4, s6
+; GFX9-NEXT: s_cselect_b32 s4, s4, s6
+; GFX9-NEXT: s_cmp_lt_u32 s5, s9
+; GFX9-NEXT: s_cselect_b32 s5, s5, s9
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX9-NEXT: s_lshr_b32 s5, s2, 16
+; GFX9-NEXT: s_lshr_b32 s6, s4, 16
+; GFX9-NEXT: s_add_i32 s2, s2, s4
+; GFX9-NEXT: s_add_i32 s5, s5, s6
+; GFX9-NEXT: s_xor_b32 s4, s3, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s5
+; GFX9-NEXT: s_lshr_b32 s5, s4, 16
+; GFX9-NEXT: s_lshr_b32 s6, s7, 16
+; GFX9-NEXT: s_and_b32 s4, s4, s11
+; GFX9-NEXT: s_and_b32 s7, s7, s11
+; GFX9-NEXT: s_cmp_lt_u32 s4, s7
+; GFX9-NEXT: s_cselect_b32 s4, s4, s7
+; GFX9-NEXT: s_cmp_lt_u32 s5, s6
+; GFX9-NEXT: s_cselect_b32 s5, s5, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX9-NEXT: s_lshr_b32 s5, s3, 16
+; GFX9-NEXT: s_lshr_b32 s6, s4, 16
+; GFX9-NEXT: s_add_i32 s3, s3, s4
+; GFX9-NEXT: s_add_i32 s5, s5, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s5
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_v8i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_pack_ll_b32_b16 s8, -1, -1
+; GFX10-NEXT: s_mov_b32 s10, 0xffff
+; GFX10-NEXT: s_xor_b32 s9, s0, s8
+; GFX10-NEXT: s_and_b32 s12, s4, s10
+; GFX10-NEXT: s_lshr_b32 s11, s9, 16
+; GFX10-NEXT: s_and_b32 s9, s9, s10
+; GFX10-NEXT: s_lshr_b32 s4, s4, 16
+; GFX10-NEXT: s_cmp_lt_u32 s9, s12
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s9, s9, s12
+; GFX10-NEXT: s_cmp_lt_u32 s11, s4
+; GFX10-NEXT: s_cselect_b32 s4, s11, s4
+; GFX10-NEXT: s_and_b32 s12, s5, s10
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s9, s4
+; GFX10-NEXT: s_lshr_b32 s9, s0, 16
+; GFX10-NEXT: s_lshr_b32 s11, s4, 16
+; GFX10-NEXT: s_add_i32 s0, s0, s4
+; GFX10-NEXT: s_xor_b32 s4, s1, s8
+; GFX10-NEXT: s_add_i32 s9, s9, s11
+; GFX10-NEXT: s_lshr_b32 s11, s4, 16
+; GFX10-NEXT: s_and_b32 s4, s4, s10
+; GFX10-NEXT: s_lshr_b32 s5, s5, 16
+; GFX10-NEXT: s_cmp_lt_u32 s4, s12
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s9
+; GFX10-NEXT: s_cselect_b32 s4, s4, s12
+; GFX10-NEXT: s_cmp_lt_u32 s11, s5
+; GFX10-NEXT: s_cselect_b32 s5, s11, s5
+; GFX10-NEXT: s_and_b32 s12, s6, s10
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX10-NEXT: s_lshr_b32 s5, s1, 16
+; GFX10-NEXT: s_lshr_b32 s11, s4, 16
+; GFX10-NEXT: s_add_i32 s1, s1, s4
+; GFX10-NEXT: s_xor_b32 s4, s2, s8
+; GFX10-NEXT: s_add_i32 s5, s5, s11
+; GFX10-NEXT: s_lshr_b32 s11, s4, 16
+; GFX10-NEXT: s_and_b32 s4, s4, s10
+; GFX10-NEXT: s_lshr_b32 s6, s6, 16
+; GFX10-NEXT: s_cmp_lt_u32 s4, s12
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s5
+; GFX10-NEXT: s_cselect_b32 s4, s4, s12
+; GFX10-NEXT: s_cmp_lt_u32 s11, s6
+; GFX10-NEXT: s_cselect_b32 s6, s11, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s4, s6
+; GFX10-NEXT: s_lshr_b32 s6, s2, 16
+; GFX10-NEXT: s_lshr_b32 s11, s4, 16
+; GFX10-NEXT: s_add_i32 s2, s2, s4
+; GFX10-NEXT: s_xor_b32 s4, s3, s8
+; GFX10-NEXT: s_add_i32 s6, s6, s11
+; GFX10-NEXT: s_lshr_b32 s8, s4, 16
+; GFX10-NEXT: s_and_b32 s4, s4, s10
+; GFX10-NEXT: s_and_b32 s10, s7, s10
+; GFX10-NEXT: s_lshr_b32 s7, s7, 16
+; GFX10-NEXT: s_cmp_lt_u32 s4, s10
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s6
+; GFX10-NEXT: s_cselect_b32 s4, s4, s10
+; GFX10-NEXT: s_cmp_lt_u32 s8, s7
+; GFX10-NEXT: s_cselect_b32 s7, s8, s7
+; GFX10-NEXT: s_lshr_b32 s5, s3, 16
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s4, s7
+; GFX10-NEXT: s_lshr_b32 s7, s4, 16
+; GFX10-NEXT: s_add_i32 s3, s3, s4
+; GFX10-NEXT: s_add_i32 s5, s5, s7
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s5
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+ %cast = bitcast <8 x i16> %result to <4 x i32>
+ ret <4 x i32> %cast
+}
+
+; FIXME: i48 broken because i48 add broken
+; define i48 @v_uaddsat_i48(i48 %lhs, i48 %rhs) {
+; %result = call i48 @llvm.uadd.sat.i48(i48 %lhs, i48 %rhs)
+; ret i48 %result
+; }
+
+; define amdgpu_ps i48 @s_uaddsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
+; %result = call i48 @llvm.uadd.sat.i48(i48 %lhs, i48 %rhs)
+; ret i48 %result
+; }
+
+; define amdgpu_ps <2 x float> @uaddsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
+; %result = call i48 @llvm.uadd.sat.i48(i48 %lhs, i48 %rhs)
+; %ext.result = zext i48 %result to i64
+; %cast = bitcast i64 %ext.result to <2 x float>
+; ret <2 x float> %cast
+; }
+
+; define amdgpu_ps <2 x float> @uaddsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
+; %result = call i48 @llvm.uadd.sat.i48(i48 %lhs, i48 %rhs)
+; %ext.result = zext i48 %result to i64
+; %cast = bitcast i64 %ext.result to <2 x float>
+; ret <2 x float> %cast
+; }
+
+define i64 @v_uaddsat_i64(i64 %lhs, i64 %rhs) {
+; GFX6-LABEL: v_uaddsat_i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v3, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v2
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v3, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_add_co_u32_e64 v0, vcc_lo, v0, v2
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i64 @llvm.uadd.sat.i64(i64 %lhs, i64 %rhs)
+ ret i64 %result
+}
+
+define amdgpu_ps i64 @s_uaddsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_add_u32 s0, s0, s2
+; GFX6-NEXT: s_cselect_b32 s4, 1, 0
+; GFX6-NEXT: s_and_b32 s4, s4, 1
+; GFX6-NEXT: s_cmp_lg_u32 s4, 0
+; GFX6-NEXT: v_mov_b32_e32 v0, s2
+; GFX6-NEXT: s_addc_u32 s1, s1, s3
+; GFX6-NEXT: v_mov_b32_e32 v1, s3
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX6-NEXT: v_mov_b32_e32 v2, s0
+; GFX6-NEXT: v_mov_b32_e32 v3, s1
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v0
+; GFX6-NEXT: v_readfirstlane_b32 s1, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_add_u32 s0, s0, s2
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: s_and_b32 s4, s4, 1
+; GFX8-NEXT: s_cmp_lg_u32 s4, 0
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: s_addc_u32 s1, s1, s3
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v0
+; GFX8-NEXT: v_readfirstlane_b32 s1, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_u32 s0, s0, s2
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: s_and_b32 s4, s4, 1
+; GFX9-NEXT: s_cmp_lg_u32 s4, 0
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: s_addc_u32 s1, s1, s3
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, s0
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: v_readfirstlane_b32 s1, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_add_u32 s0, s0, s2
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_and_b32 s4, s4, 1
+; GFX10-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-NEXT: s_addc_u32 s1, s1, s3
+; GFX10-NEXT: v_cmp_lt_u64_e64 s2, s[0:1], s[2:3]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, s0, -1, s2
+; GFX10-NEXT: v_cndmask_b32_e64 v1, s1, -1, s2
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: v_readfirstlane_b32 s1, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i64 @llvm.uadd.sat.i64(i64 %lhs, i64 %rhs)
+ ret i64 %result
+}
+
+define amdgpu_ps <2 x float> @uaddsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
+; GFX6-LABEL: uaddsat_i64_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v3, s1
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, s0, v0
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v3, v1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: uaddsat_i64_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, s0, v0
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: uaddsat_i64_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, s0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: uaddsat_i64_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_add_co_u32_e64 v2, vcc_lo, s0, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[0:1]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v3, -1, vcc_lo
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i64 @llvm.uadd.sat.i64(i64 %lhs, i64 %rhs)
+ %cast = bitcast i64 %result to <2 x float>
+ ret <2 x float> %cast
+}
+
+define amdgpu_ps <2 x float> @uaddsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
+; GFX6-LABEL: uaddsat_i64_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v2, s1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v2, vcc
+; GFX6-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: uaddsat_i64_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v2, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: uaddsat_i64_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v2, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: uaddsat_i64_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_add_co_u32_e64 v0, vcc_lo, v0, s0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[0:1], v[0:1]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc_lo
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i64 @llvm.uadd.sat.i64(i64 %lhs, i64 %rhs)
+ %cast = bitcast i64 %result to <2 x float>
+ ret <2 x float> %cast
+}
+
+define <2 x i64> @v_uaddsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; GFX6-LABEL: v_uaddsat_v2i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v4
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX6-NEXT: v_add_i32_e32 v2, vcc, v2, v6
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v3, v7, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_v2i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v4
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v5, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX8-NEXT: v_add_u32_e32 v2, vcc, v2, v6
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v7, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_v2i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v4
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, v2, v6
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v7, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_v2i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_mov_b32_e32 v10, v4
+; GFX10-NEXT: v_mov_b32_e32 v11, v5
+; GFX10-NEXT: v_mov_b32_e32 v8, v6
+; GFX10-NEXT: v_mov_b32_e32 v9, v7
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_add_co_u32_e64 v0, vcc_lo, v0, v10
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v11, vcc_lo
+; GFX10-NEXT: v_add_co_u32_e64 v2, vcc_lo, v2, v8
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v9, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[10:11]
+; GFX10-NEXT: v_cmp_lt_u64_e64 s4, v[2:3], v[8:9]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, -1, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, -1, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+ ret <2 x i64> %result
+}
+
+define amdgpu_ps <2 x i64> @s_uaddsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_v2i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_add_u32 s0, s0, s4
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: s_addc_u32 s1, s1, s5
+; GFX6-NEXT: v_mov_b32_e32 v1, s5
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX6-NEXT: v_mov_b32_e32 v2, s0
+; GFX6-NEXT: s_add_u32 s0, s2, s6
+; GFX6-NEXT: v_mov_b32_e32 v3, s1
+; GFX6-NEXT: s_cselect_b32 s1, 1, 0
+; GFX6-NEXT: s_and_b32 s1, s1, 1
+; GFX6-NEXT: s_cmp_lg_u32 s1, 0
+; GFX6-NEXT: v_mov_b32_e32 v0, s6
+; GFX6-NEXT: s_addc_u32 s1, s3, s7
+; GFX6-NEXT: v_mov_b32_e32 v1, s7
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX6-NEXT: v_mov_b32_e32 v4, s0
+; GFX6-NEXT: v_mov_b32_e32 v5, s1
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v4, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v5, -1, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v2
+; GFX6-NEXT: v_readfirstlane_b32 s1, v3
+; GFX6-NEXT: v_readfirstlane_b32 s2, v0
+; GFX6-NEXT: v_readfirstlane_b32 s3, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_v2i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: v_mov_b32_e32 v1, s5
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, s0
+; GFX8-NEXT: s_add_u32 s0, s2, s6
+; GFX8-NEXT: v_mov_b32_e32 v3, s1
+; GFX8-NEXT: s_cselect_b32 s1, 1, 0
+; GFX8-NEXT: s_and_b32 s1, s1, 1
+; GFX8-NEXT: s_cmp_lg_u32 s1, 0
+; GFX8-NEXT: v_mov_b32_e32 v0, s6
+; GFX8-NEXT: s_addc_u32 s1, s3, s7
+; GFX8-NEXT: v_mov_b32_e32 v1, s7
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v5, -1, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v2
+; GFX8-NEXT: v_readfirstlane_b32 s1, v3
+; GFX8-NEXT: v_readfirstlane_b32 s2, v0
+; GFX8-NEXT: v_readfirstlane_b32 s3, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_v2i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_u32 s0, s0, s4
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: s_addc_u32 s1, s1, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, s0
+; GFX9-NEXT: s_add_u32 s0, s2, s6
+; GFX9-NEXT: v_mov_b32_e32 v3, s1
+; GFX9-NEXT: s_cselect_b32 s1, 1, 0
+; GFX9-NEXT: s_and_b32 s1, s1, 1
+; GFX9-NEXT: s_cmp_lg_u32 s1, 0
+; GFX9-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-NEXT: s_addc_u32 s1, s3, s7
+; GFX9-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, s0
+; GFX9-NEXT: v_mov_b32_e32 v5, s1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, -1, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v2
+; GFX9-NEXT: v_readfirstlane_b32 s1, v3
+; GFX9-NEXT: v_readfirstlane_b32 s2, v0
+; GFX9-NEXT: v_readfirstlane_b32 s3, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_v2i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_add_u32 s0, s0, s4
+; GFX10-NEXT: s_cselect_b32 s8, 1, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_and_b32 s8, s8, 1
+; GFX10-NEXT: s_cmp_lg_u32 s8, 0
+; GFX10-NEXT: s_addc_u32 s1, s1, s5
+; GFX10-NEXT: s_add_u32 s2, s2, s6
+; GFX10-NEXT: s_cselect_b32 s8, 1, 0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s4, s[0:1], s[4:5]
+; GFX10-NEXT: s_and_b32 s8, s8, 1
+; GFX10-NEXT: s_cmp_lg_u32 s8, 0
+; GFX10-NEXT: s_addc_u32 s3, s3, s7
+; GFX10-NEXT: v_cndmask_b32_e64 v0, s0, -1, s4
+; GFX10-NEXT: v_cmp_lt_u64_e64 s5, s[2:3], s[6:7]
+; GFX10-NEXT: v_cndmask_b32_e64 v1, s1, -1, s4
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, s2, -1, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v3, s3, -1, s5
+; GFX10-NEXT: v_readfirstlane_b32 s1, v1
+; GFX10-NEXT: v_readfirstlane_b32 s2, v2
+; GFX10-NEXT: v_readfirstlane_b32 s3, v3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+ ret <2 x i64> %result
+}
+
+define amdgpu_ps i128 @s_uaddsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_i128:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_add_u32 s0, s0, s4
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: s_addc_u32 s1, s1, s5
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: s_addc_u32 s2, s2, s6
+; GFX6-NEXT: s_cselect_b32 s8, 1, 0
+; GFX6-NEXT: v_mov_b32_e32 v2, s4
+; GFX6-NEXT: s_and_b32 s8, s8, 1
+; GFX6-NEXT: v_mov_b32_e32 v3, s5
+; GFX6-NEXT: s_cmp_lg_u32 s8, 0
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX6-NEXT: v_mov_b32_e32 v0, s6
+; GFX6-NEXT: s_addc_u32 s3, s3, s7
+; GFX6-NEXT: v_mov_b32_e32 v1, s7
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX6-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1]
+; GFX6-NEXT: v_mov_b32_e32 v1, s0
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_mov_b32_e32 v2, s1
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v1, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v2, -1, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s2
+; GFX6-NEXT: v_mov_b32_e32 v3, s3
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v0
+; GFX6-NEXT: v_readfirstlane_b32 s1, v1
+; GFX6-NEXT: v_readfirstlane_b32 s2, v2
+; GFX6-NEXT: v_readfirstlane_b32 s3, v3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_i128:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_add_u32 s0, s0, s4
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: s_addc_u32 s1, s1, s5
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: s_addc_u32 s2, s2, s6
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: s_and_b32 s8, s8, 1
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: s_cmp_lg_u32 s8, 0
+; GFX8-NEXT: v_mov_b32_e32 v3, s5
+; GFX8-NEXT: s_addc_u32 s3, s3, s7
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v0, s6
+; GFX8-NEXT: v_mov_b32_e32 v1, s7
+; GFX8-NEXT: s_cmp_eq_u64 s[2:3], s[6:7]
+; GFX8-NEXT: s_cselect_b32 s6, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT: s_and_b32 s4, 1, s6
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: v_mov_b32_e32 v1, s0
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v1, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v2, -1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s2
+; GFX8-NEXT: v_mov_b32_e32 v3, s3
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v0
+; GFX8-NEXT: v_readfirstlane_b32 s1, v1
+; GFX8-NEXT: v_readfirstlane_b32 s2, v2
+; GFX8-NEXT: v_readfirstlane_b32 s3, v3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_i128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_u32 s0, s0, s4
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: s_addc_u32 s1, s1, s5
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: s_addc_u32 s2, s2, s6
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: s_and_b32 s8, s8, 1
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: s_cmp_lg_u32 s8, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: s_addc_u32 s3, s3, s7
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-NEXT: s_cmp_eq_u64 s[2:3], s[6:7]
+; GFX9-NEXT: s_cselect_b32 s6, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX9-NEXT: s_and_b32 s4, 1, s6
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v1, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v2, -1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v2, s2
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: v_readfirstlane_b32 s1, v1
+; GFX9-NEXT: v_readfirstlane_b32 s2, v2
+; GFX9-NEXT: v_readfirstlane_b32 s3, v3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_i128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_add_u32 s0, s0, s4
+; GFX10-NEXT: s_cselect_b32 s8, 1, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_and_b32 s8, s8, 1
+; GFX10-NEXT: s_cmp_lg_u32 s8, 0
+; GFX10-NEXT: s_addc_u32 s1, s1, s5
+; GFX10-NEXT: s_cselect_b32 s8, 1, 0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s4, s[0:1], s[4:5]
+; GFX10-NEXT: s_and_b32 s8, s8, 1
+; GFX10-NEXT: s_cmp_lg_u32 s8, 0
+; GFX10-NEXT: s_addc_u32 s2, s2, s6
+; GFX10-NEXT: s_cselect_b32 s8, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s4
+; GFX10-NEXT: s_and_b32 s8, s8, 1
+; GFX10-NEXT: s_cmp_lg_u32 s8, 0
+; GFX10-NEXT: s_addc_u32 s3, s3, s7
+; GFX10-NEXT: s_cmp_eq_u64 s[2:3], s[6:7]
+; GFX10-NEXT: v_cmp_lt_u64_e64 s5, s[2:3], s[6:7]
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: s_and_b32 s4, 1, s4
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s5
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, s0, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, s1, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, s2, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, s3, -1, vcc_lo
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: v_readfirstlane_b32 s1, v1
+; GFX10-NEXT: v_readfirstlane_b32 s2, v2
+; GFX10-NEXT: v_readfirstlane_b32 s3, v3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i128 @llvm.uadd.sat.i128(i128 %lhs, i128 %rhs)
+ ret i128 %result
+}
+
+define amdgpu_ps <4 x float> @uaddsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
+; GFX6-LABEL: uaddsat_i128_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v5, s1
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, s0, v0
+; GFX6-NEXT: v_addc_u32_e32 v5, vcc, v5, v1, vcc
+; GFX6-NEXT: v_mov_b32_e32 v6, s2
+; GFX6-NEXT: v_mov_b32_e32 v7, s3
+; GFX6-NEXT: v_addc_u32_e32 v6, vcc, v6, v2, vcc
+; GFX6-NEXT: v_addc_u32_e32 v7, vcc, v7, v3, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v4, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v5, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v6, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v7, -1, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: uaddsat_i128_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, s0, v0
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v5, v1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v6, s2
+; GFX8-NEXT: v_mov_b32_e32 v7, s3
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, v6, v2, vcc
+; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v7, v3, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v5, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v6, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v7, -1, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: uaddsat_i128_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v5, s1
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, s0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v6, s2
+; GFX9-NEXT: v_mov_b32_e32 v7, s3
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, v6, v2, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v7, v3, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v6, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v7, -1, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: uaddsat_i128_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_add_co_u32_e64 v10, vcc_lo, s0, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_add_co_ci_u32_e32 v11, vcc_lo, s1, v1, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v4, vcc_lo, s2, v2, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, s3, v3, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[10:11], v[0:1]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[4:5], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[4:5], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v10, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v11, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v4, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v5, -1, vcc_lo
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i128 @llvm.uadd.sat.i128(i128 %lhs, i128 %rhs)
+ %cast = bitcast i128 %result to <4 x float>
+ ret <4 x float> %cast
+}
+
+define amdgpu_ps <4 x float> @uaddsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
+; GFX6-LABEL: uaddsat_i128_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v4, s1
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, s0, v0
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v4, vcc
+; GFX6-NEXT: v_mov_b32_e32 v4, s2
+; GFX6-NEXT: v_mov_b32_e32 v5, s3
+; GFX6-NEXT: v_addc_u32_e32 v2, vcc, v2, v4, vcc
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
+; GFX6-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX6-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX6-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[2:3]
+; GFX6-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX6-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: uaddsat_i128_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, s1
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, s0, v0
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v4, vcc
+; GFX8-NEXT: v_mov_b32_e32 v4, s2
+; GFX8-NEXT: v_mov_b32_e32 v5, s3
+; GFX8-NEXT: v_addc_u32_e32 v2, vcc, v2, v4, vcc
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX8-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: uaddsat_i128_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v4, s1
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v0
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v4, vcc
+; GFX9-NEXT: v_mov_b32_e32 v4, s2
+; GFX9-NEXT: v_mov_b32_e32 v5, s3
+; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v4, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v5, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX9-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: uaddsat_i128_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_add_co_u32_e64 v0, vcc_lo, v0, s0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, s2, v2, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[0:1], v[0:1]
+; GFX10-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[2:3], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc_lo
+; GFX10-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc_lo
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i128 @llvm.uadd.sat.i128(i128 %lhs, i128 %rhs)
+ %cast = bitcast i128 %result to <4 x float>
+ ret <4 x float> %cast
+}
+
+define <2 x i128> @v_uaddsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
+; GFX6-LABEL: v_uaddsat_v2i128:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_add_i32_e32 v0, vcc, v0, v8
+; GFX6-NEXT: v_addc_u32_e32 v1, vcc, v1, v9, vcc
+; GFX6-NEXT: v_addc_u32_e32 v2, vcc, v2, v10, vcc
+; GFX6-NEXT: v_addc_u32_e32 v3, vcc, v3, v11, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[10:11]
+; GFX6-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[10:11]
+; GFX6-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; GFX6-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX6-NEXT: v_add_i32_e32 v4, vcc, v4, v12
+; GFX6-NEXT: v_addc_u32_e32 v5, vcc, v5, v13, vcc
+; GFX6-NEXT: v_addc_u32_e32 v6, vcc, v6, v14, vcc
+; GFX6-NEXT: v_addc_u32_e32 v7, vcc, v7, v15, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[12:13]
+; GFX6-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[14:15]
+; GFX6-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[14:15]
+; GFX6-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; GFX6-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX6-NEXT: v_cndmask_b32_e64 v4, v4, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v5, v5, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v6, v6, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v7, v7, -1, vcc
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_uaddsat_v2i128:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_add_u32_e32 v0, vcc, v0, v8
+; GFX8-NEXT: v_addc_u32_e32 v1, vcc, v1, v9, vcc
+; GFX8-NEXT: v_addc_u32_e32 v2, vcc, v2, v10, vcc
+; GFX8-NEXT: v_addc_u32_e32 v3, vcc, v3, v11, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[10:11]
+; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[10:11]
+; GFX8-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; GFX8-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX8-NEXT: v_add_u32_e32 v4, vcc, v4, v12
+; GFX8-NEXT: v_addc_u32_e32 v5, vcc, v5, v13, vcc
+; GFX8-NEXT: v_addc_u32_e32 v6, vcc, v6, v14, vcc
+; GFX8-NEXT: v_addc_u32_e32 v7, vcc, v7, v15, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[12:13]
+; GFX8-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[14:15]
+; GFX8-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[14:15]
+; GFX8-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; GFX8-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v4, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v5, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v6, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v7, -1, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_uaddsat_v2i128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, v0, v8
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v9, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v2, vcc, v2, v10, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v11, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[10:11]
+; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[10:11]
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; GFX9-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, v4, v12
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v5, v13, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v6, vcc, v6, v14, vcc
+; GFX9-NEXT: v_addc_co_u32_e32 v7, vcc, v7, v15, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[12:13]
+; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[14:15]
+; GFX9-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[14:15]
+; GFX9-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc
+; GFX9-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v4, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v5, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v6, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v7, -1, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_uaddsat_v2i128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_mov_b32_e32 v18, v8
+; GFX10-NEXT: v_mov_b32_e32 v19, v9
+; GFX10-NEXT: v_mov_b32_e32 v16, v10
+; GFX10-NEXT: v_mov_b32_e32 v17, v11
+; GFX10-NEXT: v_mov_b32_e32 v10, v12
+; GFX10-NEXT: v_add_co_u32_e64 v0, vcc_lo, v0, v18
+; GFX10-NEXT: v_mov_b32_e32 v11, v13
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, v1, v19, vcc_lo
+; GFX10-NEXT: v_mov_b32_e32 v20, v14
+; GFX10-NEXT: v_add_co_ci_u32_e32 v2, vcc_lo, v2, v16, vcc_lo
+; GFX10-NEXT: v_mov_b32_e32 v21, v15
+; GFX10-NEXT: v_add_co_ci_u32_e32 v3, vcc_lo, v3, v17, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[18:19]
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_cndmask_b32_e64 v8, 0, 1, vcc_lo
+; GFX10-NEXT: v_add_co_u32_e64 v4, vcc_lo, v4, v10
+; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, v5, v11, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v6, vcc_lo, v6, v20, vcc_lo
+; GFX10-NEXT: v_add_co_ci_u32_e32 v7, vcc_lo, v7, v21, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[2:3], v[16:17]
+; GFX10-NEXT: v_cndmask_b32_e64 v9, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[4:5], v[10:11]
+; GFX10-NEXT: v_cndmask_b32_e64 v12, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[6:7], v[20:21]
+; GFX10-NEXT: v_cndmask_b32_e64 v13, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[16:17]
+; GFX10-NEXT: v_cndmask_b32_e32 v8, v9, v8, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[6:7], v[20:21]
+; GFX10-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX10-NEXT: v_cndmask_b32_e32 v9, v13, v12, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v8
+; GFX10-NEXT: v_and_b32_e32 v9, 1, v9
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, -1, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, v9
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, -1, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, -1, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, -1, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, -1, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128> %lhs, <2 x i128> %rhs)
+ ret <2 x i128> %result
+}
+
+define amdgpu_ps <2 x i128> @s_uaddsat_v2i128(<2 x i128> inreg %lhs, <2 x i128> inreg %rhs) {
+; GFX6-LABEL: s_uaddsat_v2i128:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_add_u32 s0, s0, s8
+; GFX6-NEXT: s_cselect_b32 s16, 1, 0
+; GFX6-NEXT: s_and_b32 s16, s16, 1
+; GFX6-NEXT: s_cmp_lg_u32 s16, 0
+; GFX6-NEXT: s_addc_u32 s1, s1, s9
+; GFX6-NEXT: s_cselect_b32 s16, 1, 0
+; GFX6-NEXT: s_and_b32 s16, s16, 1
+; GFX6-NEXT: s_cmp_lg_u32 s16, 0
+; GFX6-NEXT: s_addc_u32 s2, s2, s10
+; GFX6-NEXT: s_cselect_b32 s16, 1, 0
+; GFX6-NEXT: v_mov_b32_e32 v2, s8
+; GFX6-NEXT: s_and_b32 s16, s16, 1
+; GFX6-NEXT: v_mov_b32_e32 v3, s9
+; GFX6-NEXT: s_cmp_lg_u32 s16, 0
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX6-NEXT: v_mov_b32_e32 v0, s10
+; GFX6-NEXT: s_addc_u32 s3, s3, s11
+; GFX6-NEXT: v_mov_b32_e32 v1, s11
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX6-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1]
+; GFX6-NEXT: v_mov_b32_e32 v1, s0
+; GFX6-NEXT: s_add_u32 s0, s4, s12
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s1
+; GFX6-NEXT: s_cselect_b32 s1, 1, 0
+; GFX6-NEXT: s_and_b32 s1, s1, 1
+; GFX6-NEXT: s_cmp_lg_u32 s1, 0
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: s_addc_u32 s1, s5, s13
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_mov_b32_e32 v0, s2
+; GFX6-NEXT: s_cselect_b32 s2, 1, 0
+; GFX6-NEXT: s_and_b32 s2, s2, 1
+; GFX6-NEXT: s_cmp_lg_u32 s2, 0
+; GFX6-NEXT: s_addc_u32 s2, s6, s14
+; GFX6-NEXT: v_cndmask_b32_e64 v4, v1, -1, vcc
+; GFX6-NEXT: v_mov_b32_e32 v1, s3
+; GFX6-NEXT: s_cselect_b32 s3, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e64 v5, v2, -1, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s12
+; GFX6-NEXT: s_and_b32 s3, s3, 1
+; GFX6-NEXT: v_mov_b32_e32 v3, s13
+; GFX6-NEXT: s_cmp_lg_u32 s3, 0
+; GFX6-NEXT: v_cndmask_b32_e64 v6, v0, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v7, v1, -1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX6-NEXT: v_mov_b32_e32 v0, s14
+; GFX6-NEXT: s_addc_u32 s3, s7, s15
+; GFX6-NEXT: v_mov_b32_e32 v1, s15
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX6-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1]
+; GFX6-NEXT: v_mov_b32_e32 v1, s0
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_mov_b32_e32 v2, s1
+; GFX6-NEXT: v_mov_b32_e32 v3, s3
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v1, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v2, -1, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s2
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v4
+; GFX6-NEXT: v_readfirstlane_b32 s1, v5
+; GFX6-NEXT: v_readfirstlane_b32 s2, v6
+; GFX6-NEXT: v_readfirstlane_b32 s3, v7
+; GFX6-NEXT: v_readfirstlane_b32 s4, v0
+; GFX6-NEXT: v_readfirstlane_b32 s5, v1
+; GFX6-NEXT: v_readfirstlane_b32 s6, v2
+; GFX6-NEXT: v_readfirstlane_b32 s7, v3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_uaddsat_v2i128:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_add_u32 s0, s0, s8
+; GFX8-NEXT: s_cselect_b32 s16, 1, 0
+; GFX8-NEXT: s_and_b32 s16, s16, 1
+; GFX8-NEXT: s_cmp_lg_u32 s16, 0
+; GFX8-NEXT: s_addc_u32 s1, s1, s9
+; GFX8-NEXT: s_cselect_b32 s16, 1, 0
+; GFX8-NEXT: s_and_b32 s16, s16, 1
+; GFX8-NEXT: s_cmp_lg_u32 s16, 0
+; GFX8-NEXT: s_addc_u32 s2, s2, s10
+; GFX8-NEXT: s_cselect_b32 s16, 1, 0
+; GFX8-NEXT: s_and_b32 s16, s16, 1
+; GFX8-NEXT: v_mov_b32_e32 v2, s8
+; GFX8-NEXT: s_cmp_lg_u32 s16, 0
+; GFX8-NEXT: v_mov_b32_e32 v3, s9
+; GFX8-NEXT: s_addc_u32 s3, s3, s11
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v0, s10
+; GFX8-NEXT: v_mov_b32_e32 v1, s11
+; GFX8-NEXT: s_cmp_eq_u64 s[2:3], s[10:11]
+; GFX8-NEXT: s_cselect_b32 s10, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT: s_and_b32 s8, 1, s10
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s8
+; GFX8-NEXT: v_mov_b32_e32 v1, s0
+; GFX8-NEXT: s_add_u32 s0, s4, s12
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: s_cselect_b32 s1, 1, 0
+; GFX8-NEXT: s_and_b32 s1, s1, 1
+; GFX8-NEXT: s_cmp_lg_u32 s1, 0
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: s_addc_u32 s1, s5, s13
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: s_cselect_b32 s2, 1, 0
+; GFX8-NEXT: s_and_b32 s2, s2, 1
+; GFX8-NEXT: s_cmp_lg_u32 s2, 0
+; GFX8-NEXT: s_addc_u32 s2, s6, s14
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v1, -1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: s_cselect_b32 s3, 1, 0
+; GFX8-NEXT: s_and_b32 s3, s3, 1
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v2, -1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s12
+; GFX8-NEXT: s_cmp_lg_u32 s3, 0
+; GFX8-NEXT: v_mov_b32_e32 v3, s13
+; GFX8-NEXT: s_addc_u32 s3, s7, s15
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v0, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v1, -1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v0, s14
+; GFX8-NEXT: s_cmp_eq_u64 s[2:3], s[14:15]
+; GFX8-NEXT: v_mov_b32_e32 v1, s15
+; GFX8-NEXT: s_cselect_b32 s4, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT: s_and_b32 s4, 1, s4
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: v_mov_b32_e32 v1, s0
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: v_mov_b32_e32 v3, s3
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v1, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v2, -1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s2
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v4
+; GFX8-NEXT: v_readfirstlane_b32 s1, v5
+; GFX8-NEXT: v_readfirstlane_b32 s2, v6
+; GFX8-NEXT: v_readfirstlane_b32 s3, v7
+; GFX8-NEXT: v_readfirstlane_b32 s4, v0
+; GFX8-NEXT: v_readfirstlane_b32 s5, v1
+; GFX8-NEXT: v_readfirstlane_b32 s6, v2
+; GFX8-NEXT: v_readfirstlane_b32 s7, v3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_uaddsat_v2i128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_add_u32 s0, s0, s8
+; GFX9-NEXT: s_cselect_b32 s16, 1, 0
+; GFX9-NEXT: s_and_b32 s16, s16, 1
+; GFX9-NEXT: s_cmp_lg_u32 s16, 0
+; GFX9-NEXT: s_addc_u32 s1, s1, s9
+; GFX9-NEXT: s_cselect_b32 s16, 1, 0
+; GFX9-NEXT: s_and_b32 s16, s16, 1
+; GFX9-NEXT: s_cmp_lg_u32 s16, 0
+; GFX9-NEXT: s_addc_u32 s2, s2, s10
+; GFX9-NEXT: s_cselect_b32 s16, 1, 0
+; GFX9-NEXT: s_and_b32 s16, s16, 1
+; GFX9-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-NEXT: s_cmp_lg_u32 s16, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s9
+; GFX9-NEXT: s_addc_u32 s3, s3, s11
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s10
+; GFX9-NEXT: v_mov_b32_e32 v1, s11
+; GFX9-NEXT: s_cmp_eq_u64 s[2:3], s[10:11]
+; GFX9-NEXT: s_cselect_b32 s10, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX9-NEXT: s_and_b32 s8, 1, s10
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s8
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: s_add_u32 s0, s4, s12
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: s_cselect_b32 s1, 1, 0
+; GFX9-NEXT: s_and_b32 s1, s1, 1
+; GFX9-NEXT: s_cmp_lg_u32 s1, 0
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: s_addc_u32 s1, s5, s13
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: s_cselect_b32 s2, 1, 0
+; GFX9-NEXT: s_and_b32 s2, s2, 1
+; GFX9-NEXT: s_cmp_lg_u32 s2, 0
+; GFX9-NEXT: s_addc_u32 s2, s6, s14
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v1, -1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: s_cselect_b32 s3, 1, 0
+; GFX9-NEXT: s_and_b32 s3, s3, 1
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v2, -1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v2, s12
+; GFX9-NEXT: s_cmp_lg_u32 s3, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s13
+; GFX9-NEXT: s_addc_u32 s3, s7, s15
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v0, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v1, -1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s14
+; GFX9-NEXT: s_cmp_eq_u64 s[2:3], s[14:15]
+; GFX9-NEXT: v_mov_b32_e32 v1, s15
+; GFX9-NEXT: s_cselect_b32 s4, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX9-NEXT: s_and_b32 s4, 1, s4
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v1, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v2, -1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v2, s2
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, -1, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, -1, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v4
+; GFX9-NEXT: v_readfirstlane_b32 s1, v5
+; GFX9-NEXT: v_readfirstlane_b32 s2, v6
+; GFX9-NEXT: v_readfirstlane_b32 s3, v7
+; GFX9-NEXT: v_readfirstlane_b32 s4, v0
+; GFX9-NEXT: v_readfirstlane_b32 s5, v1
+; GFX9-NEXT: v_readfirstlane_b32 s6, v2
+; GFX9-NEXT: v_readfirstlane_b32 s7, v3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_uaddsat_v2i128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_add_u32 s0, s0, s8
+; GFX10-NEXT: s_cselect_b32 s16, 1, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_and_b32 s16, s16, 1
+; GFX10-NEXT: s_cmp_lg_u32 s16, 0
+; GFX10-NEXT: s_addc_u32 s1, s1, s9
+; GFX10-NEXT: s_cselect_b32 s16, 1, 0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s8, s[0:1], s[8:9]
+; GFX10-NEXT: s_and_b32 s16, s16, 1
+; GFX10-NEXT: s_cmp_lg_u32 s16, 0
+; GFX10-NEXT: s_addc_u32 s2, s2, s10
+; GFX10-NEXT: s_cselect_b32 s16, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s8
+; GFX10-NEXT: s_and_b32 s16, s16, 1
+; GFX10-NEXT: s_cmp_lg_u32 s16, 0
+; GFX10-NEXT: s_addc_u32 s3, s3, s11
+; GFX10-NEXT: s_cmp_eq_u64 s[2:3], s[10:11]
+; GFX10-NEXT: v_cmp_lt_u64_e64 s10, s[2:3], s[10:11]
+; GFX10-NEXT: s_cselect_b32 s16, 1, 0
+; GFX10-NEXT: s_and_b32 s8, 1, s16
+; GFX10-NEXT: s_add_u32 s4, s4, s12
+; GFX10-NEXT: s_cselect_b32 s9, 1, 0
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s8
+; GFX10-NEXT: s_and_b32 s9, s9, 1
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s10
+; GFX10-NEXT: s_cmp_lg_u32 s9, 0
+; GFX10-NEXT: s_addc_u32 s5, s5, s13
+; GFX10-NEXT: s_cselect_b32 s9, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX10-NEXT: s_and_b32 s9, s9, 1
+; GFX10-NEXT: s_cmp_lg_u32 s9, 0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s9, s[4:5], s[12:13]
+; GFX10-NEXT: s_addc_u32 s6, s6, s14
+; GFX10-NEXT: s_cselect_b32 s8, 1, 0
+; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX10-NEXT: s_and_b32 s8, s8, 1
+; GFX10-NEXT: s_cmp_lg_u32 s8, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s9
+; GFX10-NEXT: s_addc_u32 s7, s7, s15
+; GFX10-NEXT: s_cmp_eq_u64 s[6:7], s[14:15]
+; GFX10-NEXT: v_cmp_lt_u64_e64 s9, s[6:7], s[14:15]
+; GFX10-NEXT: s_cselect_b32 s8, 1, 0
+; GFX10-NEXT: s_and_b32 s8, 1, s8
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s8
+; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, s9
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT: v_and_b32_e32 v0, 1, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v1, s0, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, s1, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, s2, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v4, s3, -1, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT: v_readfirstlane_b32 s0, v1
+; GFX10-NEXT: v_readfirstlane_b32 s1, v2
+; GFX10-NEXT: v_readfirstlane_b32 s2, v3
+; GFX10-NEXT: v_readfirstlane_b32 s3, v4
+; GFX10-NEXT: v_cndmask_b32_e64 v0, s4, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, s5, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, s6, -1, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, s7, -1, vcc_lo
+; GFX10-NEXT: v_readfirstlane_b32 s4, v0
+; GFX10-NEXT: v_readfirstlane_b32 s5, v1
+; GFX10-NEXT: v_readfirstlane_b32 s6, v2
+; GFX10-NEXT: v_readfirstlane_b32 s7, v3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128> %lhs, <2 x i128> %rhs)
+ ret <2 x i128> %result
+}
+
+declare i7 @llvm.uadd.sat.i7(i7, i7) #0
+declare i8 @llvm.uadd.sat.i8(i8, i8) #0
+declare <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8>, <2 x i8>) #0
+declare <4 x i8> @llvm.uadd.sat.v4i8(<4 x i8>, <4 x i8>) #0
+
+declare i16 @llvm.uadd.sat.i16(i16, i16) #0
+declare <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16>, <2 x i16>) #0
+declare <3 x i16> @llvm.uadd.sat.v3i16(<3 x i16>, <3 x i16>) #0
+declare <4 x i16> @llvm.uadd.sat.v4i16(<4 x i16>, <4 x i16>) #0
+declare <5 x i16> @llvm.uadd.sat.v5i16(<5 x i16>, <5 x i16>) #0
+declare <6 x i16> @llvm.uadd.sat.v6i16(<6 x i16>, <6 x i16>) #0
+declare <8 x i16> @llvm.uadd.sat.v8i16(<8 x i16>, <8 x i16>) #0
+
+declare i24 @llvm.uadd.sat.i24(i24, i24) #0
+
+declare i32 @llvm.uadd.sat.i32(i32, i32) #0
+declare <2 x i32> @llvm.uadd.sat.v2i32(<2 x i32>, <2 x i32>) #0
+declare <3 x i32> @llvm.uadd.sat.v3i32(<3 x i32>, <3 x i32>) #0
+declare <4 x i32> @llvm.uadd.sat.v4i32(<4 x i32>, <4 x i32>) #0
+declare <5 x i32> @llvm.uadd.sat.v5i32(<5 x i32>, <5 x i32>) #0
+declare <16 x i32> @llvm.uadd.sat.v16i32(<16 x i32>, <16 x i32>) #0
+
+declare i48 @llvm.uadd.sat.i48(i48, i48) #0
+
+declare i64 @llvm.uadd.sat.i64(i64, i64) #0
+declare <2 x i64> @llvm.uadd.sat.v2i64(<2 x i64>, <2 x i64>) #0
+
+declare i128 @llvm.uadd.sat.i128(i128, i128) #0
+declare <2 x i128> @llvm.uadd.sat.v2i128(<2 x i128>, <2 x i128>) #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
new file mode 100644
index 000000000000..b111fd31851c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/usubsat.ll
@@ -0,0 +1,4440 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=tahiti -o - %s | FileCheck -check-prefix=GFX6 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=fiji -o - %s | FileCheck -check-prefix=GFX8 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 -o - %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -global-isel -mtriple=amdgcn-amd-amdpal -mcpu=gfx1010 -o - %s | FileCheck -check-prefix=GFX10 %s
+
+define i7 @v_usubsat_i7(i7 %lhs, i7 %rhs) {
+; GFX6-LABEL: v_usubsat_i7:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 25, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 25, v1
+; GFX6-NEXT: v_min_u32_e32 v1, v0, v1
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 25, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_i7:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 9, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 9, v1
+; GFX8-NEXT: v_min_u16_e32 v1, v0, v1
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_lshrrev_b16_e32 v0, 9, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_i7:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 9, v0
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 9, v1
+; GFX9-NEXT: v_min_u16_e32 v1, v0, v1
+; GFX9-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_lshrrev_b16_e32 v0, 9, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_i7:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_lshlrev_b16_e64 v0, 9, v0
+; GFX10-NEXT: v_lshlrev_b16_e64 v1, 9, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_u16_e64 v1, v0, v1
+; GFX10-NEXT: v_sub_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b16_e64 v0, 9, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i7 @llvm.usub.sat.i7(i7 %lhs, i7 %rhs)
+ ret i7 %result
+}
+
+define amdgpu_ps i7 @s_usubsat_i7(i7 inreg %lhs, i7 inreg %rhs) {
+; GFX6-LABEL: s_usubsat_i7:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 25
+; GFX6-NEXT: s_lshl_b32 s1, s1, 25
+; GFX6-NEXT: s_cmp_lt_u32 s0, s1
+; GFX6-NEXT: s_cselect_b32 s1, s0, s1
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: s_lshr_b32 s0, s0, 25
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_i7:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s2, 9, 0x100000
+; GFX8-NEXT: s_lshl_b32 s1, s1, s2
+; GFX8-NEXT: s_lshl_b32 s0, s0, s2
+; GFX8-NEXT: s_bfe_u32 s3, s0, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s3, s1
+; GFX8-NEXT: s_cselect_b32 s1, s3, s1
+; GFX8-NEXT: s_sub_i32 s0, s0, s1
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshr_b32 s0, s0, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_i7:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s2, 9, 0x100000
+; GFX9-NEXT: s_lshl_b32 s1, s1, s2
+; GFX9-NEXT: s_lshl_b32 s0, s0, s2
+; GFX9-NEXT: s_bfe_u32 s3, s0, 0x100000
+; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s3, s1
+; GFX9-NEXT: s_cselect_b32 s1, s3, s1
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX9-NEXT: s_lshr_b32 s0, s0, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_i7:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s2, 9, 0x100000
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_lshl_b32 s1, s1, s2
+; GFX10-NEXT: s_lshl_b32 s0, s0, s2
+; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX10-NEXT: s_bfe_u32 s3, s0, 0x100000
+; GFX10-NEXT: s_cmp_lt_u32 s3, s1
+; GFX10-NEXT: s_cselect_b32 s1, s3, s1
+; GFX10-NEXT: s_sub_i32 s0, s0, s1
+; GFX10-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX10-NEXT: s_lshr_b32 s0, s0, s2
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i7 @llvm.usub.sat.i7(i7 %lhs, i7 %rhs)
+ ret i7 %result
+}
+
+define i8 @v_usubsat_i8(i8 %lhs, i8 %rhs) {
+; GFX6-LABEL: v_usubsat_i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_min_u32_e32 v1, v0, v1
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_min_u16_e32 v1, v0, v1
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_lshrrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_min_u16_e32 v1, v0, v1
+; GFX9-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_lshrrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_lshlrev_b16_e64 v0, 8, v0
+; GFX10-NEXT: v_lshlrev_b16_e64 v1, 8, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_u16_e64 v1, v0, v1
+; GFX10-NEXT: v_sub_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b16_e64 v0, 8, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i8 @llvm.usub.sat.i8(i8 %lhs, i8 %rhs)
+ ret i8 %result
+}
+
+define amdgpu_ps i8 @s_usubsat_i8(i8 inreg %lhs, i8 inreg %rhs) {
+; GFX6-LABEL: s_usubsat_i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_cmp_lt_u32 s0, s1
+; GFX6-NEXT: s_cselect_b32 s1, s0, s1
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: s_lshr_b32 s0, s0, 24
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX8-NEXT: s_lshl_b32 s1, s1, s2
+; GFX8-NEXT: s_lshl_b32 s0, s0, s2
+; GFX8-NEXT: s_bfe_u32 s3, s0, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s3, s1
+; GFX8-NEXT: s_cselect_b32 s1, s3, s1
+; GFX8-NEXT: s_sub_i32 s0, s0, s1
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshr_b32 s0, s0, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX9-NEXT: s_lshl_b32 s1, s1, s2
+; GFX9-NEXT: s_lshl_b32 s0, s0, s2
+; GFX9-NEXT: s_bfe_u32 s3, s0, 0x100000
+; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s3, s1
+; GFX9-NEXT: s_cselect_b32 s1, s3, s1
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX9-NEXT: s_lshr_b32 s0, s0, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_lshl_b32 s1, s1, s2
+; GFX10-NEXT: s_lshl_b32 s0, s0, s2
+; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX10-NEXT: s_bfe_u32 s3, s0, 0x100000
+; GFX10-NEXT: s_cmp_lt_u32 s3, s1
+; GFX10-NEXT: s_cselect_b32 s1, s3, s1
+; GFX10-NEXT: s_sub_i32 s0, s0, s1
+; GFX10-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX10-NEXT: s_lshr_b32 s0, s0, s2
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i8 @llvm.usub.sat.i8(i8 %lhs, i8 %rhs)
+ ret i8 %result
+}
+
+define i16 @v_usubsat_v2i8(i16 %lhs.arg, i16 %rhs.arg) {
+; GFX6-LABEL: v_usubsat_v2i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 8, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 8, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_min_u32_e32 v1, v0, v1
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 24, v3
+; GFX6-NEXT: v_min_u32_e32 v2, v1, v2
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_mov_b32_e32 v2, 0xff
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: v_and_b32_e32 v1, v1, v2
+; GFX6-NEXT: v_and_b32_e32 v0, v0, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_v2i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, 8
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v2, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_sdwa v2, v2, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_min_u16_e32 v1, v0, v1
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_min_u16_e32 v1, v3, v2
+; GFX8-NEXT: v_mov_b32_e32 v2, 0xff
+; GFX8-NEXT: v_sub_u16_e32 v1, v3, v1
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_sdwa v1, v1, v2 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_v2i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 8
+; GFX9-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_lshrrev_b32_sdwa v3, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_min_u16_e32 v1, v0, v1
+; GFX9-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_min_u16_e32 v1, v2, v3
+; GFX9-NEXT: s_movk_i32 s4, 0xff
+; GFX9-NEXT: v_sub_u16_e32 v1, v2, v1
+; GFX9-NEXT: v_and_b32_sdwa v0, v0, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_v2i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_mov_b32 s4, 8
+; GFX10-NEXT: v_lshlrev_b16_e64 v3, 8, v1
+; GFX10-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_lshrrev_b32_sdwa v1, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_lshlrev_b16_e64 v0, 8, v0
+; GFX10-NEXT: s_movk_i32 s4, 0xff
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_u16_e64 v1, v2, v1
+; GFX10-NEXT: v_min_u16_e64 v3, v0, v3
+; GFX10-NEXT: v_sub_nc_u16_e64 v1, v2, v1
+; GFX10-NEXT: v_sub_nc_u16_e64 v0, v0, v3
+; GFX10-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_b32_sdwa v0, v0, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %lhs = bitcast i16 %lhs.arg to <2 x i8>
+ %rhs = bitcast i16 %rhs.arg to <2 x i8>
+ %result = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %lhs, <2 x i8> %rhs)
+ %cast.result = bitcast <2 x i8> %result to i16
+ ret i16 %cast.result
+}
+
+define amdgpu_ps i16 @s_usubsat_v2i8(i16 inreg %lhs.arg, i16 inreg %rhs.arg) {
+; GFX6-LABEL: s_usubsat_v2i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshr_b32 s2, s0, 8
+; GFX6-NEXT: s_lshr_b32 s3, s1, 8
+; GFX6-NEXT: s_lshl_b32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_cmp_lt_u32 s0, s1
+; GFX6-NEXT: s_cselect_b32 s1, s0, s1
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: s_lshl_b32 s1, s2, 24
+; GFX6-NEXT: s_lshr_b32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s2, s3, 24
+; GFX6-NEXT: s_cmp_lt_u32 s1, s2
+; GFX6-NEXT: s_cselect_b32 s2, s1, s2
+; GFX6-NEXT: s_sub_i32 s1, s1, s2
+; GFX6-NEXT: s_movk_i32 s2, 0xff
+; GFX6-NEXT: s_lshr_b32 s1, s1, 24
+; GFX6-NEXT: s_and_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s0, s0, s2
+; GFX6-NEXT: s_lshl_b32 s1, s1, 8
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_v2i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s4, 8, 0x100000
+; GFX8-NEXT: s_lshr_b32 s2, s0, 8
+; GFX8-NEXT: s_lshr_b32 s3, s1, 8
+; GFX8-NEXT: s_lshl_b32 s1, s1, s4
+; GFX8-NEXT: s_lshl_b32 s0, s0, s4
+; GFX8-NEXT: s_bfe_u32 s5, s0, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s5, s1
+; GFX8-NEXT: s_cselect_b32 s1, s5, s1
+; GFX8-NEXT: s_sub_i32 s0, s0, s1
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s1, s2, s4
+; GFX8-NEXT: s_lshl_b32 s2, s3, s4
+; GFX8-NEXT: s_lshr_b32 s0, s0, s4
+; GFX8-NEXT: s_bfe_u32 s3, s1, 0x100000
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s3, s2
+; GFX8-NEXT: s_cselect_b32 s2, s3, s2
+; GFX8-NEXT: s_sub_i32 s1, s1, s2
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_movk_i32 s2, 0xff
+; GFX8-NEXT: s_lshr_b32 s1, s1, s4
+; GFX8-NEXT: s_and_b32 s1, s1, s2
+; GFX8-NEXT: s_and_b32 s0, s0, s2
+; GFX8-NEXT: s_lshl_b32 s1, s1, s4
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_v2i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s4, 8, 0x100000
+; GFX9-NEXT: s_lshr_b32 s2, s0, 8
+; GFX9-NEXT: s_lshr_b32 s3, s1, 8
+; GFX9-NEXT: s_lshl_b32 s1, s1, s4
+; GFX9-NEXT: s_lshl_b32 s0, s0, s4
+; GFX9-NEXT: s_bfe_u32 s5, s0, 0x100000
+; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s5, s1
+; GFX9-NEXT: s_cselect_b32 s1, s5, s1
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX9-NEXT: s_lshl_b32 s1, s2, s4
+; GFX9-NEXT: s_lshl_b32 s2, s3, s4
+; GFX9-NEXT: s_lshr_b32 s0, s0, s4
+; GFX9-NEXT: s_bfe_u32 s3, s1, 0x100000
+; GFX9-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s3, s2
+; GFX9-NEXT: s_cselect_b32 s2, s3, s2
+; GFX9-NEXT: s_sub_i32 s1, s1, s2
+; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX9-NEXT: s_movk_i32 s2, 0xff
+; GFX9-NEXT: s_lshr_b32 s1, s1, s4
+; GFX9-NEXT: s_and_b32 s1, s1, s2
+; GFX9-NEXT: s_and_b32 s0, s0, s2
+; GFX9-NEXT: s_lshl_b32 s1, s1, s4
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_v2i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s2, 8, 0x100000
+; GFX10-NEXT: s_lshr_b32 s3, s0, 8
+; GFX10-NEXT: s_lshl_b32 s4, s1, s2
+; GFX10-NEXT: s_lshl_b32 s0, s0, s2
+; GFX10-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX10-NEXT: s_bfe_u32 s5, s0, 0x100000
+; GFX10-NEXT: s_lshr_b32 s1, s1, 8
+; GFX10-NEXT: s_cmp_lt_u32 s5, s4
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s4, s5, s4
+; GFX10-NEXT: s_lshl_b32 s1, s1, s2
+; GFX10-NEXT: s_sub_i32 s0, s0, s4
+; GFX10-NEXT: s_lshl_b32 s3, s3, s2
+; GFX10-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX10-NEXT: s_bfe_u32 s4, s3, 0x100000
+; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX10-NEXT: s_lshr_b32 s0, s0, s2
+; GFX10-NEXT: s_cmp_lt_u32 s4, s1
+; GFX10-NEXT: s_cselect_b32 s1, s4, s1
+; GFX10-NEXT: s_sub_i32 s1, s3, s1
+; GFX10-NEXT: s_movk_i32 s3, 0xff
+; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX10-NEXT: s_and_b32 s0, s0, s3
+; GFX10-NEXT: s_lshr_b32 s1, s1, s2
+; GFX10-NEXT: s_and_b32 s1, s1, s3
+; GFX10-NEXT: s_lshl_b32 s1, s1, s2
+; GFX10-NEXT: s_or_b32 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %lhs = bitcast i16 %lhs.arg to <2 x i8>
+ %rhs = bitcast i16 %rhs.arg to <2 x i8>
+ %result = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> %lhs, <2 x i8> %rhs)
+ %cast.result = bitcast <2 x i8> %result to i16
+ ret i16 %cast.result
+}
+
+define i32 @v_usubsat_v4i8(i32 %lhs.arg, i32 %rhs.arg) {
+; GFX6-LABEL: v_usubsat_v4i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 8, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v4, 24, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v5, 8, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v7, 24, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_min_u32_e32 v1, v0, v1
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 24, v5
+; GFX6-NEXT: v_min_u32_e32 v2, v1, v2
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 24, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 24, v6
+; GFX6-NEXT: v_min_u32_e32 v3, v2, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: s_movk_i32 s4, 0xff
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 24, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 24, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 24, v7
+; GFX6-NEXT: v_min_u32_e32 v4, v3, v4
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 24, v2
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, v3, v4
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 24, v3
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_v4i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_mov_b32_e32 v2, 8
+; GFX8-NEXT: v_lshrrev_b32_sdwa v3, v2, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 24, v0
+; GFX8-NEXT: v_lshrrev_b32_sdwa v2, v2, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 24, v1
+; GFX8-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX8-NEXT: v_min_u16_e32 v1, v0, v1
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT: v_min_u16_e32 v1, v3, v2
+; GFX8-NEXT: v_sub_u16_e32 v1, v3, v1
+; GFX8-NEXT: v_lshlrev_b16_e32 v2, 8, v4
+; GFX8-NEXT: v_lshlrev_b16_e32 v3, 8, v6
+; GFX8-NEXT: v_min_u16_e32 v3, v2, v3
+; GFX8-NEXT: v_sub_u16_e32 v2, v2, v3
+; GFX8-NEXT: v_lshlrev_b16_e32 v3, 8, v5
+; GFX8-NEXT: v_lshlrev_b16_e32 v4, 8, v7
+; GFX8-NEXT: v_min_u16_e32 v4, v3, v4
+; GFX8-NEXT: v_sub_u16_e32 v3, v3, v4
+; GFX8-NEXT: v_mov_b32_e32 v4, 0xff
+; GFX8-NEXT: v_and_b32_sdwa v1, v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_and_b32_sdwa v0, v0, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v2, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: v_and_b32_sdwa v1, v3, v4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_v4i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: s_mov_b32 s4, 8
+; GFX9-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_lshrrev_b32_sdwa v5, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_lshrrev_b32_e32 v3, 16, v0
+; GFX9-NEXT: v_lshrrev_b32_e32 v4, 24, v0
+; GFX9-NEXT: v_lshrrev_b32_e32 v6, 16, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v1
+; GFX9-NEXT: v_lshlrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: v_lshlrev_b16_e32 v1, 8, v1
+; GFX9-NEXT: v_min_u16_e32 v1, v0, v1
+; GFX9-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT: v_min_u16_e32 v1, v2, v5
+; GFX9-NEXT: v_sub_u16_e32 v1, v2, v1
+; GFX9-NEXT: v_lshlrev_b16_e32 v2, 8, v3
+; GFX9-NEXT: v_lshlrev_b16_e32 v3, 8, v6
+; GFX9-NEXT: v_min_u16_e32 v3, v2, v3
+; GFX9-NEXT: v_sub_u16_e32 v2, v2, v3
+; GFX9-NEXT: v_lshlrev_b16_e32 v3, 8, v4
+; GFX9-NEXT: v_lshlrev_b16_e32 v4, 8, v7
+; GFX9-NEXT: s_movk_i32 s4, 0xff
+; GFX9-NEXT: v_min_u16_e32 v4, v3, v4
+; GFX9-NEXT: v_and_b32_sdwa v1, v1, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_lshrrev_b16_e32 v0, 8, v0
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_sub_u16_e32 v3, v3, v4
+; GFX9-NEXT: v_and_or_b32 v0, v0, s4, v1
+; GFX9-NEXT: v_and_b32_sdwa v1, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_and_b32_sdwa v2, v3, s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX9-NEXT: v_or3_b32 v0, v0, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_v4i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: s_mov_b32 s4, 8
+; GFX10-NEXT: v_lshlrev_b16_e64 v4, 8, v0
+; GFX10-NEXT: v_lshrrev_b32_sdwa v2, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_lshrrev_b32_sdwa v3, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_lshlrev_b16_e64 v5, 8, v1
+; GFX10-NEXT: s_mov_b32 s4, 16
+; GFX10-NEXT: s_mov_b32 s5, 24
+; GFX10-NEXT: v_lshrrev_b32_sdwa v6, s4, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_min_u16_e64 v3, v2, v3
+; GFX10-NEXT: v_lshrrev_b32_sdwa v7, s4, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_min_u16_e64 v5, v4, v5
+; GFX10-NEXT: v_lshrrev_b32_sdwa v0, s5, v0 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_lshrrev_b32_sdwa v1, s5, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX10-NEXT: v_sub_nc_u16_e64 v2, v2, v3
+; GFX10-NEXT: s_movk_i32 s4, 0xff
+; GFX10-NEXT: v_min_u16_e64 v3, v6, v7
+; GFX10-NEXT: v_sub_nc_u16_e64 v4, v4, v5
+; GFX10-NEXT: v_min_u16_e64 v1, v0, v1
+; GFX10-NEXT: v_and_b32_sdwa v2, v2, s4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u16_e64 v3, v6, v3
+; GFX10-NEXT: v_lshrrev_b16_e64 v4, 8, v4
+; GFX10-NEXT: v_sub_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: v_lshlrev_b32_e32 v2, 8, v2
+; GFX10-NEXT: v_and_b32_sdwa v1, v3, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_b32_sdwa v0, v0, s4 dst_sel:BYTE_3 dst_unused:UNUSED_PAD src0_sel:BYTE_1 src1_sel:DWORD
+; GFX10-NEXT: v_and_or_b32 v2, v4, s4, v2
+; GFX10-NEXT: v_or3_b32 v0, v2, v1, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %lhs = bitcast i32 %lhs.arg to <4 x i8>
+ %rhs = bitcast i32 %rhs.arg to <4 x i8>
+ %result = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %lhs, <4 x i8> %rhs)
+ %cast.result = bitcast <4 x i8> %result to i32
+ ret i32 %cast.result
+}
+
+define amdgpu_ps i32 @s_usubsat_v4i8(i32 inreg %lhs.arg, i32 inreg %rhs.arg) {
+; GFX6-LABEL: s_usubsat_v4i8:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshr_b32 s2, s0, 8
+; GFX6-NEXT: s_lshr_b32 s3, s0, 16
+; GFX6-NEXT: s_lshr_b32 s4, s0, 24
+; GFX6-NEXT: s_lshr_b32 s5, s1, 8
+; GFX6-NEXT: s_lshr_b32 s6, s1, 16
+; GFX6-NEXT: s_lshr_b32 s7, s1, 24
+; GFX6-NEXT: s_lshl_b32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_cmp_lt_u32 s0, s1
+; GFX6-NEXT: s_cselect_b32 s1, s0, s1
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: s_lshl_b32 s1, s2, 24
+; GFX6-NEXT: s_lshr_b32 s0, s0, 24
+; GFX6-NEXT: s_lshl_b32 s2, s5, 24
+; GFX6-NEXT: s_cmp_lt_u32 s1, s2
+; GFX6-NEXT: s_cselect_b32 s2, s1, s2
+; GFX6-NEXT: s_sub_i32 s1, s1, s2
+; GFX6-NEXT: s_lshl_b32 s2, s3, 24
+; GFX6-NEXT: s_lshr_b32 s1, s1, 24
+; GFX6-NEXT: s_lshl_b32 s3, s6, 24
+; GFX6-NEXT: s_cmp_lt_u32 s2, s3
+; GFX6-NEXT: s_cselect_b32 s3, s2, s3
+; GFX6-NEXT: s_sub_i32 s2, s2, s3
+; GFX6-NEXT: s_lshl_b32 s3, s4, 24
+; GFX6-NEXT: s_lshr_b32 s2, s2, 24
+; GFX6-NEXT: s_lshl_b32 s4, s7, 24
+; GFX6-NEXT: s_cmp_lt_u32 s3, s4
+; GFX6-NEXT: s_cselect_b32 s4, s3, s4
+; GFX6-NEXT: s_sub_i32 s3, s3, s4
+; GFX6-NEXT: s_movk_i32 s4, 0xff
+; GFX6-NEXT: s_and_b32 s1, s1, s4
+; GFX6-NEXT: s_and_b32 s0, s0, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 8
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshr_b32 s3, s3, 24
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s3, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 24
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_v4i8:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s8, 8, 0x100000
+; GFX8-NEXT: s_lshr_b32 s2, s0, 8
+; GFX8-NEXT: s_lshr_b32 s3, s0, 16
+; GFX8-NEXT: s_lshr_b32 s4, s0, 24
+; GFX8-NEXT: s_lshl_b32 s0, s0, s8
+; GFX8-NEXT: s_lshr_b32 s5, s1, 8
+; GFX8-NEXT: s_lshr_b32 s6, s1, 16
+; GFX8-NEXT: s_lshr_b32 s7, s1, 24
+; GFX8-NEXT: s_lshl_b32 s1, s1, s8
+; GFX8-NEXT: s_bfe_u32 s9, s0, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s9, s1
+; GFX8-NEXT: s_cselect_b32 s1, s9, s1
+; GFX8-NEXT: s_sub_i32 s0, s0, s1
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s1, s2, s8
+; GFX8-NEXT: s_lshl_b32 s2, s5, s8
+; GFX8-NEXT: s_lshr_b32 s0, s0, s8
+; GFX8-NEXT: s_bfe_u32 s5, s1, 0x100000
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s5, s2
+; GFX8-NEXT: s_cselect_b32 s2, s5, s2
+; GFX8-NEXT: s_sub_i32 s1, s1, s2
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_lshl_b32 s2, s3, s8
+; GFX8-NEXT: s_lshl_b32 s3, s6, s8
+; GFX8-NEXT: s_lshr_b32 s1, s1, s8
+; GFX8-NEXT: s_bfe_u32 s5, s2, 0x100000
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s5, s3
+; GFX8-NEXT: s_cselect_b32 s3, s5, s3
+; GFX8-NEXT: s_sub_i32 s2, s2, s3
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_lshl_b32 s3, s4, s8
+; GFX8-NEXT: s_lshl_b32 s4, s7, s8
+; GFX8-NEXT: s_lshr_b32 s2, s2, s8
+; GFX8-NEXT: s_bfe_u32 s5, s3, 0x100000
+; GFX8-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s5, s4
+; GFX8-NEXT: s_cselect_b32 s4, s5, s4
+; GFX8-NEXT: s_sub_i32 s3, s3, s4
+; GFX8-NEXT: s_movk_i32 s4, 0xff
+; GFX8-NEXT: s_and_b32 s1, s1, s4
+; GFX8-NEXT: s_and_b32 s0, s0, s4
+; GFX8-NEXT: s_lshl_b32 s1, s1, 8
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: s_and_b32 s1, s2, s4
+; GFX8-NEXT: s_lshl_b32 s1, s1, 16
+; GFX8-NEXT: s_lshr_b32 s3, s3, s8
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: s_and_b32 s1, s3, s4
+; GFX8-NEXT: s_lshl_b32 s1, s1, 24
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_v4i8:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s8, 8, 0x100000
+; GFX9-NEXT: s_lshr_b32 s2, s0, 8
+; GFX9-NEXT: s_lshr_b32 s3, s0, 16
+; GFX9-NEXT: s_lshr_b32 s4, s0, 24
+; GFX9-NEXT: s_lshl_b32 s0, s0, s8
+; GFX9-NEXT: s_lshr_b32 s5, s1, 8
+; GFX9-NEXT: s_lshr_b32 s6, s1, 16
+; GFX9-NEXT: s_lshr_b32 s7, s1, 24
+; GFX9-NEXT: s_lshl_b32 s1, s1, s8
+; GFX9-NEXT: s_bfe_u32 s9, s0, 0x100000
+; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s9, s1
+; GFX9-NEXT: s_cselect_b32 s1, s9, s1
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX9-NEXT: s_lshl_b32 s1, s2, s8
+; GFX9-NEXT: s_lshl_b32 s2, s5, s8
+; GFX9-NEXT: s_lshr_b32 s0, s0, s8
+; GFX9-NEXT: s_bfe_u32 s5, s1, 0x100000
+; GFX9-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s5, s2
+; GFX9-NEXT: s_cselect_b32 s2, s5, s2
+; GFX9-NEXT: s_sub_i32 s1, s1, s2
+; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX9-NEXT: s_lshl_b32 s2, s3, s8
+; GFX9-NEXT: s_lshl_b32 s3, s6, s8
+; GFX9-NEXT: s_lshr_b32 s1, s1, s8
+; GFX9-NEXT: s_bfe_u32 s5, s2, 0x100000
+; GFX9-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s5, s3
+; GFX9-NEXT: s_cselect_b32 s3, s5, s3
+; GFX9-NEXT: s_sub_i32 s2, s2, s3
+; GFX9-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX9-NEXT: s_lshl_b32 s3, s4, s8
+; GFX9-NEXT: s_lshl_b32 s4, s7, s8
+; GFX9-NEXT: s_lshr_b32 s2, s2, s8
+; GFX9-NEXT: s_bfe_u32 s5, s3, 0x100000
+; GFX9-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s5, s4
+; GFX9-NEXT: s_cselect_b32 s4, s5, s4
+; GFX9-NEXT: s_sub_i32 s3, s3, s4
+; GFX9-NEXT: s_movk_i32 s4, 0xff
+; GFX9-NEXT: s_and_b32 s1, s1, s4
+; GFX9-NEXT: s_and_b32 s0, s0, s4
+; GFX9-NEXT: s_lshl_b32 s1, s1, 8
+; GFX9-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: s_and_b32 s1, s2, s4
+; GFX9-NEXT: s_lshl_b32 s1, s1, 16
+; GFX9-NEXT: s_lshr_b32 s3, s3, s8
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: s_and_b32 s1, s3, s4
+; GFX9-NEXT: s_lshl_b32 s1, s1, 24
+; GFX9-NEXT: s_or_b32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_v4i8:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s6, 8, 0x100000
+; GFX10-NEXT: s_lshr_b32 s2, s0, 8
+; GFX10-NEXT: s_lshl_b32 s8, s1, s6
+; GFX10-NEXT: s_lshr_b32 s3, s0, 16
+; GFX10-NEXT: s_lshr_b32 s4, s0, 24
+; GFX10-NEXT: s_lshl_b32 s0, s0, s6
+; GFX10-NEXT: s_bfe_u32 s8, s8, 0x100000
+; GFX10-NEXT: s_bfe_u32 s9, s0, 0x100000
+; GFX10-NEXT: s_lshr_b32 s5, s1, 8
+; GFX10-NEXT: s_lshr_b32 s7, s1, 16
+; GFX10-NEXT: s_lshr_b32 s1, s1, 24
+; GFX10-NEXT: s_cmp_lt_u32 s9, s8
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s8, s9, s8
+; GFX10-NEXT: s_lshl_b32 s5, s5, s6
+; GFX10-NEXT: s_sub_i32 s0, s0, s8
+; GFX10-NEXT: s_lshl_b32 s2, s2, s6
+; GFX10-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX10-NEXT: s_bfe_u32 s8, s2, 0x100000
+; GFX10-NEXT: s_bfe_u32 s5, s5, 0x100000
+; GFX10-NEXT: s_lshr_b32 s0, s0, s6
+; GFX10-NEXT: s_cmp_lt_u32 s8, s5
+; GFX10-NEXT: s_cselect_b32 s5, s8, s5
+; GFX10-NEXT: s_lshl_b32 s3, s3, s6
+; GFX10-NEXT: s_sub_i32 s2, s2, s5
+; GFX10-NEXT: s_lshl_b32 s5, s7, s6
+; GFX10-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX10-NEXT: s_bfe_u32 s7, s3, 0x100000
+; GFX10-NEXT: s_bfe_u32 s5, s5, 0x100000
+; GFX10-NEXT: s_lshr_b32 s2, s2, s6
+; GFX10-NEXT: s_cmp_lt_u32 s7, s5
+; GFX10-NEXT: s_cselect_b32 s5, s7, s5
+; GFX10-NEXT: s_lshl_b32 s1, s1, s6
+; GFX10-NEXT: s_sub_i32 s3, s3, s5
+; GFX10-NEXT: s_lshl_b32 s4, s4, s6
+; GFX10-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX10-NEXT: s_bfe_u32 s5, s4, 0x100000
+; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX10-NEXT: s_lshr_b32 s3, s3, s6
+; GFX10-NEXT: s_cmp_lt_u32 s5, s1
+; GFX10-NEXT: s_cselect_b32 s1, s5, s1
+; GFX10-NEXT: s_sub_i32 s1, s4, s1
+; GFX10-NEXT: s_movk_i32 s4, 0xff
+; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX10-NEXT: s_and_b32 s2, s2, s4
+; GFX10-NEXT: s_lshr_b32 s1, s1, s6
+; GFX10-NEXT: s_and_b32 s0, s0, s4
+; GFX10-NEXT: s_lshl_b32 s2, s2, 8
+; GFX10-NEXT: s_and_b32 s3, s3, s4
+; GFX10-NEXT: s_and_b32 s1, s1, s4
+; GFX10-NEXT: s_or_b32 s0, s0, s2
+; GFX10-NEXT: s_lshl_b32 s2, s3, 16
+; GFX10-NEXT: s_lshl_b32 s1, s1, 24
+; GFX10-NEXT: s_or_b32 s0, s0, s2
+; GFX10-NEXT: s_or_b32 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %lhs = bitcast i32 %lhs.arg to <4 x i8>
+ %rhs = bitcast i32 %rhs.arg to <4 x i8>
+ %result = call <4 x i8> @llvm.usub.sat.v4i8(<4 x i8> %lhs, <4 x i8> %rhs)
+ %cast.result = bitcast <4 x i8> %result to i32
+ ret i32 %cast.result
+}
+
+define i24 @v_usubsat_i24(i24 %lhs, i24 %rhs) {
+; GFX6-LABEL: v_usubsat_i24:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX6-NEXT: v_min_u32_e32 v1, v0, v1
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 8, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_i24:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX8-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX8-NEXT: v_min_u32_e32 v1, v0, v1
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: v_lshrrev_b32_e32 v0, 8, v0
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_i24:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX9-NEXT: v_min_u32_e32 v1, v0, v1
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v1
+; GFX9-NEXT: v_lshrrev_b32_e32 v0, 8, v0
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_i24:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX10-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_min_u32_e32 v1, v0, v1
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT: v_lshrrev_b32_e32 v0, 8, v0
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i24 @llvm.usub.sat.i24(i24 %lhs, i24 %rhs)
+ ret i24 %result
+}
+
+define amdgpu_ps i24 @s_usubsat_i24(i24 inreg %lhs, i24 inreg %rhs) {
+; GFX6-LABEL: s_usubsat_i24:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 8
+; GFX6-NEXT: s_lshl_b32 s1, s1, 8
+; GFX6-NEXT: s_cmp_lt_u32 s0, s1
+; GFX6-NEXT: s_cselect_b32 s1, s0, s1
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: s_lshr_b32 s0, s0, 8
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_i24:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshl_b32 s0, s0, 8
+; GFX8-NEXT: s_lshl_b32 s1, s1, 8
+; GFX8-NEXT: s_cmp_lt_u32 s0, s1
+; GFX8-NEXT: s_cselect_b32 s1, s0, s1
+; GFX8-NEXT: s_sub_i32 s0, s0, s1
+; GFX8-NEXT: s_lshr_b32 s0, s0, 8
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_i24:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_lshl_b32 s0, s0, 8
+; GFX9-NEXT: s_lshl_b32 s1, s1, 8
+; GFX9-NEXT: s_cmp_lt_u32 s0, s1
+; GFX9-NEXT: s_cselect_b32 s1, s0, s1
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: s_lshr_b32 s0, s0, 8
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_i24:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_lshl_b32 s0, s0, 8
+; GFX10-NEXT: s_lshl_b32 s1, s1, 8
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lt_u32 s0, s1
+; GFX10-NEXT: s_cselect_b32 s1, s0, s1
+; GFX10-NEXT: s_sub_i32 s0, s0, s1
+; GFX10-NEXT: s_lshr_b32 s0, s0, 8
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i24 @llvm.usub.sat.i24(i24 %lhs, i24 %rhs)
+ ret i24 %result
+}
+
+define i32 @v_usubsat_i32(i32 %lhs, i32 %rhs) {
+; GFX6-LABEL: v_usubsat_i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_min_u32_e32 v1, v0, v1
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u32_e32 v1, v0, v1
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_u32_e32 v1, v0, v1
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_u32_e32 v1, v0, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i32 @llvm.usub.sat.i32(i32 %lhs, i32 %rhs)
+ ret i32 %result
+}
+
+define amdgpu_ps i32 @s_usubsat_i32(i32 inreg %lhs, i32 inreg %rhs) {
+; GFX6-LABEL: s_usubsat_i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_lt_u32 s0, s1
+; GFX6-NEXT: s_cselect_b32 s1, s0, s1
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_lt_u32 s0, s1
+; GFX8-NEXT: s_cselect_b32 s1, s0, s1
+; GFX8-NEXT: s_sub_i32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_lt_u32 s0, s1
+; GFX9-NEXT: s_cselect_b32 s1, s0, s1
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_lt_u32 s0, s1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s1, s0, s1
+; GFX10-NEXT: s_sub_i32 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i32 @llvm.usub.sat.i32(i32 %lhs, i32 %rhs)
+ ret i32 %result
+}
+
+define amdgpu_ps float @usubsat_i32_sv(i32 inreg %lhs, i32 %rhs) {
+; GFX6-LABEL: usubsat_i32_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_min_u32_e32 v0, s0, v0
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s0, v0
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: usubsat_i32_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_min_u32_e32 v0, s0, v0
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, s0, v0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: usubsat_i32_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_u32_e32 v0, s0, v0
+; GFX9-NEXT: v_sub_u32_e32 v0, s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: usubsat_i32_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_min_u32_e32 v0, s0, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, s0, v0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i32 @llvm.usub.sat.i32(i32 %lhs, i32 %rhs)
+ %cast = bitcast i32 %result to float
+ ret float %cast
+}
+
+define amdgpu_ps float @usubsat_i32_vs(i32 %lhs, i32 inreg %rhs) {
+; GFX6-LABEL: usubsat_i32_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_min_u32_e32 v1, s0, v0
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: usubsat_i32_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_min_u32_e32 v1, s0, v0
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: usubsat_i32_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_u32_e32 v1, s0, v0
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: usubsat_i32_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_min_u32_e32 v1, s0, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i32 @llvm.usub.sat.i32(i32 %lhs, i32 %rhs)
+ %cast = bitcast i32 %result to float
+ ret float %cast
+}
+
+define <2 x i32> @v_usubsat_v2i32(<2 x i32> %lhs, <2 x i32> %rhs) {
+; GFX6-LABEL: v_usubsat_v2i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_min_u32_e32 v2, v0, v2
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_min_u32_e32 v2, v1, v3
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_v2i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u32_e32 v2, v0, v2
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v2
+; GFX8-NEXT: v_min_u32_e32 v2, v1, v3
+; GFX8-NEXT: v_sub_u32_e32 v1, vcc, v1, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_v2i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_u32_e32 v2, v0, v2
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v2
+; GFX9-NEXT: v_min_u32_e32 v2, v1, v3
+; GFX9-NEXT: v_sub_u32_e32 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_v2i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_u32_e32 v2, v0, v2
+; GFX10-NEXT: v_min_u32_e32 v3, v1, v3
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v2
+; GFX10-NEXT: v_sub_nc_u32_e32 v1, v1, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+ ret <2 x i32> %result
+}
+
+define amdgpu_ps <2 x i32> @s_usubsat_v2i32(<2 x i32> inreg %lhs, <2 x i32> inreg %rhs) {
+; GFX6-LABEL: s_usubsat_v2i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_lt_u32 s0, s2
+; GFX6-NEXT: s_cselect_b32 s2, s0, s2
+; GFX6-NEXT: s_sub_i32 s0, s0, s2
+; GFX6-NEXT: s_cmp_lt_u32 s1, s3
+; GFX6-NEXT: s_cselect_b32 s2, s1, s3
+; GFX6-NEXT: s_sub_i32 s1, s1, s2
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_v2i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_lt_u32 s0, s2
+; GFX8-NEXT: s_cselect_b32 s2, s0, s2
+; GFX8-NEXT: s_sub_i32 s0, s0, s2
+; GFX8-NEXT: s_cmp_lt_u32 s1, s3
+; GFX8-NEXT: s_cselect_b32 s2, s1, s3
+; GFX8-NEXT: s_sub_i32 s1, s1, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_v2i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_lt_u32 s0, s2
+; GFX9-NEXT: s_cselect_b32 s2, s0, s2
+; GFX9-NEXT: s_sub_i32 s0, s0, s2
+; GFX9-NEXT: s_cmp_lt_u32 s1, s3
+; GFX9-NEXT: s_cselect_b32 s2, s1, s3
+; GFX9-NEXT: s_sub_i32 s1, s1, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_v2i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_lt_u32 s0, s2
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s2, s0, s2
+; GFX10-NEXT: s_sub_i32 s0, s0, s2
+; GFX10-NEXT: s_cmp_lt_u32 s1, s3
+; GFX10-NEXT: s_cselect_b32 s2, s1, s3
+; GFX10-NEXT: s_sub_i32 s1, s1, s2
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i32> @llvm.usub.sat.v2i32(<2 x i32> %lhs, <2 x i32> %rhs)
+ ret <2 x i32> %result
+}
+
+define <3 x i32> @v_usubsat_v3i32(<3 x i32> %lhs, <3 x i32> %rhs) {
+; GFX6-LABEL: v_usubsat_v3i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_min_u32_e32 v3, v0, v3
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v3
+; GFX6-NEXT: v_min_u32_e32 v3, v1, v4
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v3
+; GFX6-NEXT: v_min_u32_e32 v3, v2, v5
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v3
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_v3i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u32_e32 v3, v0, v3
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v3
+; GFX8-NEXT: v_min_u32_e32 v3, v1, v4
+; GFX8-NEXT: v_sub_u32_e32 v1, vcc, v1, v3
+; GFX8-NEXT: v_min_u32_e32 v3, v2, v5
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, v2, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_v3i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_u32_e32 v3, v0, v3
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v3
+; GFX9-NEXT: v_min_u32_e32 v3, v1, v4
+; GFX9-NEXT: v_sub_u32_e32 v1, v1, v3
+; GFX9-NEXT: v_min_u32_e32 v3, v2, v5
+; GFX9-NEXT: v_sub_u32_e32 v2, v2, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_v3i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_u32_e32 v3, v0, v3
+; GFX10-NEXT: v_min_u32_e32 v4, v1, v4
+; GFX10-NEXT: v_min_u32_e32 v5, v2, v5
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v3
+; GFX10-NEXT: v_sub_nc_u32_e32 v1, v1, v4
+; GFX10-NEXT: v_sub_nc_u32_e32 v2, v2, v5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <3 x i32> @llvm.usub.sat.v3i32(<3 x i32> %lhs, <3 x i32> %rhs)
+ ret <3 x i32> %result
+}
+
+define amdgpu_ps <3 x i32> @s_usubsat_v3i32(<3 x i32> inreg %lhs, <3 x i32> inreg %rhs) {
+; GFX6-LABEL: s_usubsat_v3i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_lt_u32 s0, s3
+; GFX6-NEXT: s_cselect_b32 s3, s0, s3
+; GFX6-NEXT: s_sub_i32 s0, s0, s3
+; GFX6-NEXT: s_cmp_lt_u32 s1, s4
+; GFX6-NEXT: s_cselect_b32 s3, s1, s4
+; GFX6-NEXT: s_sub_i32 s1, s1, s3
+; GFX6-NEXT: s_cmp_lt_u32 s2, s5
+; GFX6-NEXT: s_cselect_b32 s3, s2, s5
+; GFX6-NEXT: s_sub_i32 s2, s2, s3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_v3i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_lt_u32 s0, s3
+; GFX8-NEXT: s_cselect_b32 s3, s0, s3
+; GFX8-NEXT: s_sub_i32 s0, s0, s3
+; GFX8-NEXT: s_cmp_lt_u32 s1, s4
+; GFX8-NEXT: s_cselect_b32 s3, s1, s4
+; GFX8-NEXT: s_sub_i32 s1, s1, s3
+; GFX8-NEXT: s_cmp_lt_u32 s2, s5
+; GFX8-NEXT: s_cselect_b32 s3, s2, s5
+; GFX8-NEXT: s_sub_i32 s2, s2, s3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_v3i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_lt_u32 s0, s3
+; GFX9-NEXT: s_cselect_b32 s3, s0, s3
+; GFX9-NEXT: s_sub_i32 s0, s0, s3
+; GFX9-NEXT: s_cmp_lt_u32 s1, s4
+; GFX9-NEXT: s_cselect_b32 s3, s1, s4
+; GFX9-NEXT: s_sub_i32 s1, s1, s3
+; GFX9-NEXT: s_cmp_lt_u32 s2, s5
+; GFX9-NEXT: s_cselect_b32 s3, s2, s5
+; GFX9-NEXT: s_sub_i32 s2, s2, s3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_v3i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_lt_u32 s0, s3
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s3, s0, s3
+; GFX10-NEXT: s_sub_i32 s0, s0, s3
+; GFX10-NEXT: s_cmp_lt_u32 s1, s4
+; GFX10-NEXT: s_cselect_b32 s3, s1, s4
+; GFX10-NEXT: s_sub_i32 s1, s1, s3
+; GFX10-NEXT: s_cmp_lt_u32 s2, s5
+; GFX10-NEXT: s_cselect_b32 s3, s2, s5
+; GFX10-NEXT: s_sub_i32 s2, s2, s3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <3 x i32> @llvm.usub.sat.v3i32(<3 x i32> %lhs, <3 x i32> %rhs)
+ ret <3 x i32> %result
+}
+
+define <4 x i32> @v_usubsat_v4i32(<4 x i32> %lhs, <4 x i32> %rhs) {
+; GFX6-LABEL: v_usubsat_v4i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_min_u32_e32 v4, v0, v4
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
+; GFX6-NEXT: v_min_u32_e32 v4, v1, v5
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v4
+; GFX6-NEXT: v_min_u32_e32 v4, v2, v6
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v4
+; GFX6-NEXT: v_min_u32_e32 v4, v3, v7
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, v3, v4
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_v4i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u32_e32 v4, v0, v4
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v4
+; GFX8-NEXT: v_min_u32_e32 v4, v1, v5
+; GFX8-NEXT: v_sub_u32_e32 v1, vcc, v1, v4
+; GFX8-NEXT: v_min_u32_e32 v4, v2, v6
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, v2, v4
+; GFX8-NEXT: v_min_u32_e32 v4, v3, v7
+; GFX8-NEXT: v_sub_u32_e32 v3, vcc, v3, v4
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_v4i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_u32_e32 v4, v0, v4
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v4
+; GFX9-NEXT: v_min_u32_e32 v4, v1, v5
+; GFX9-NEXT: v_sub_u32_e32 v1, v1, v4
+; GFX9-NEXT: v_min_u32_e32 v4, v2, v6
+; GFX9-NEXT: v_sub_u32_e32 v2, v2, v4
+; GFX9-NEXT: v_min_u32_e32 v4, v3, v7
+; GFX9-NEXT: v_sub_u32_e32 v3, v3, v4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_v4i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_u32_e32 v19, v2, v6
+; GFX10-NEXT: v_min_u32_e32 v11, v0, v4
+; GFX10-NEXT: v_min_u32_e32 v15, v1, v5
+; GFX10-NEXT: v_min_u32_e32 v6, v3, v7
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u32_e32 v2, v2, v19
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v11
+; GFX10-NEXT: v_sub_nc_u32_e32 v1, v1, v15
+; GFX10-NEXT: v_sub_nc_u32_e32 v3, v3, v6
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+ ret <4 x i32> %result
+}
+
+define amdgpu_ps <4 x i32> @s_usubsat_v4i32(<4 x i32> inreg %lhs, <4 x i32> inreg %rhs) {
+; GFX6-LABEL: s_usubsat_v4i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_lt_u32 s0, s4
+; GFX6-NEXT: s_cselect_b32 s4, s0, s4
+; GFX6-NEXT: s_sub_i32 s0, s0, s4
+; GFX6-NEXT: s_cmp_lt_u32 s1, s5
+; GFX6-NEXT: s_cselect_b32 s4, s1, s5
+; GFX6-NEXT: s_sub_i32 s1, s1, s4
+; GFX6-NEXT: s_cmp_lt_u32 s2, s6
+; GFX6-NEXT: s_cselect_b32 s4, s2, s6
+; GFX6-NEXT: s_sub_i32 s2, s2, s4
+; GFX6-NEXT: s_cmp_lt_u32 s3, s7
+; GFX6-NEXT: s_cselect_b32 s4, s3, s7
+; GFX6-NEXT: s_sub_i32 s3, s3, s4
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_v4i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_lt_u32 s0, s4
+; GFX8-NEXT: s_cselect_b32 s4, s0, s4
+; GFX8-NEXT: s_sub_i32 s0, s0, s4
+; GFX8-NEXT: s_cmp_lt_u32 s1, s5
+; GFX8-NEXT: s_cselect_b32 s4, s1, s5
+; GFX8-NEXT: s_sub_i32 s1, s1, s4
+; GFX8-NEXT: s_cmp_lt_u32 s2, s6
+; GFX8-NEXT: s_cselect_b32 s4, s2, s6
+; GFX8-NEXT: s_sub_i32 s2, s2, s4
+; GFX8-NEXT: s_cmp_lt_u32 s3, s7
+; GFX8-NEXT: s_cselect_b32 s4, s3, s7
+; GFX8-NEXT: s_sub_i32 s3, s3, s4
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_v4i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_lt_u32 s0, s4
+; GFX9-NEXT: s_cselect_b32 s4, s0, s4
+; GFX9-NEXT: s_sub_i32 s0, s0, s4
+; GFX9-NEXT: s_cmp_lt_u32 s1, s5
+; GFX9-NEXT: s_cselect_b32 s4, s1, s5
+; GFX9-NEXT: s_sub_i32 s1, s1, s4
+; GFX9-NEXT: s_cmp_lt_u32 s2, s6
+; GFX9-NEXT: s_cselect_b32 s4, s2, s6
+; GFX9-NEXT: s_sub_i32 s2, s2, s4
+; GFX9-NEXT: s_cmp_lt_u32 s3, s7
+; GFX9-NEXT: s_cselect_b32 s4, s3, s7
+; GFX9-NEXT: s_sub_i32 s3, s3, s4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_v4i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_lt_u32 s0, s4
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s4, s0, s4
+; GFX10-NEXT: s_sub_i32 s0, s0, s4
+; GFX10-NEXT: s_cmp_lt_u32 s1, s5
+; GFX10-NEXT: s_cselect_b32 s4, s1, s5
+; GFX10-NEXT: s_sub_i32 s1, s1, s4
+; GFX10-NEXT: s_cmp_lt_u32 s2, s6
+; GFX10-NEXT: s_cselect_b32 s4, s2, s6
+; GFX10-NEXT: s_sub_i32 s2, s2, s4
+; GFX10-NEXT: s_cmp_lt_u32 s3, s7
+; GFX10-NEXT: s_cselect_b32 s4, s3, s7
+; GFX10-NEXT: s_sub_i32 s3, s3, s4
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <4 x i32> @llvm.usub.sat.v4i32(<4 x i32> %lhs, <4 x i32> %rhs)
+ ret <4 x i32> %result
+}
+
+define <5 x i32> @v_usubsat_v5i32(<5 x i32> %lhs, <5 x i32> %rhs) {
+; GFX6-LABEL: v_usubsat_v5i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_min_u32_e32 v5, v0, v5
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v5
+; GFX6-NEXT: v_min_u32_e32 v5, v1, v6
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v5
+; GFX6-NEXT: v_min_u32_e32 v5, v2, v7
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v5
+; GFX6-NEXT: v_min_u32_e32 v5, v3, v8
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, v3, v5
+; GFX6-NEXT: v_min_u32_e32 v5, v4, v9
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, v4, v5
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_v5i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u32_e32 v5, v0, v5
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v5
+; GFX8-NEXT: v_min_u32_e32 v5, v1, v6
+; GFX8-NEXT: v_sub_u32_e32 v1, vcc, v1, v5
+; GFX8-NEXT: v_min_u32_e32 v5, v2, v7
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, v2, v5
+; GFX8-NEXT: v_min_u32_e32 v5, v3, v8
+; GFX8-NEXT: v_sub_u32_e32 v3, vcc, v3, v5
+; GFX8-NEXT: v_min_u32_e32 v5, v4, v9
+; GFX8-NEXT: v_sub_u32_e32 v4, vcc, v4, v5
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_v5i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_u32_e32 v5, v0, v5
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v5
+; GFX9-NEXT: v_min_u32_e32 v5, v1, v6
+; GFX9-NEXT: v_sub_u32_e32 v1, v1, v5
+; GFX9-NEXT: v_min_u32_e32 v5, v2, v7
+; GFX9-NEXT: v_sub_u32_e32 v2, v2, v5
+; GFX9-NEXT: v_min_u32_e32 v5, v3, v8
+; GFX9-NEXT: v_sub_u32_e32 v3, v3, v5
+; GFX9-NEXT: v_min_u32_e32 v5, v4, v9
+; GFX9-NEXT: v_sub_u32_e32 v4, v4, v5
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_v5i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_u32_e32 v5, v0, v5
+; GFX10-NEXT: v_min_u32_e32 v6, v1, v6
+; GFX10-NEXT: v_min_u32_e32 v7, v2, v7
+; GFX10-NEXT: v_min_u32_e32 v8, v3, v8
+; GFX10-NEXT: v_min_u32_e32 v9, v4, v9
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v5
+; GFX10-NEXT: v_sub_nc_u32_e32 v1, v1, v6
+; GFX10-NEXT: v_sub_nc_u32_e32 v2, v2, v7
+; GFX10-NEXT: v_sub_nc_u32_e32 v3, v3, v8
+; GFX10-NEXT: v_sub_nc_u32_e32 v4, v4, v9
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <5 x i32> @llvm.usub.sat.v5i32(<5 x i32> %lhs, <5 x i32> %rhs)
+ ret <5 x i32> %result
+}
+
+define amdgpu_ps <5 x i32> @s_usubsat_v5i32(<5 x i32> inreg %lhs, <5 x i32> inreg %rhs) {
+; GFX6-LABEL: s_usubsat_v5i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_lt_u32 s0, s5
+; GFX6-NEXT: s_cselect_b32 s5, s0, s5
+; GFX6-NEXT: s_sub_i32 s0, s0, s5
+; GFX6-NEXT: s_cmp_lt_u32 s1, s6
+; GFX6-NEXT: s_cselect_b32 s5, s1, s6
+; GFX6-NEXT: s_sub_i32 s1, s1, s5
+; GFX6-NEXT: s_cmp_lt_u32 s2, s7
+; GFX6-NEXT: s_cselect_b32 s5, s2, s7
+; GFX6-NEXT: s_sub_i32 s2, s2, s5
+; GFX6-NEXT: s_cmp_lt_u32 s3, s8
+; GFX6-NEXT: s_cselect_b32 s5, s3, s8
+; GFX6-NEXT: s_sub_i32 s3, s3, s5
+; GFX6-NEXT: s_cmp_lt_u32 s4, s9
+; GFX6-NEXT: s_cselect_b32 s5, s4, s9
+; GFX6-NEXT: s_sub_i32 s4, s4, s5
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_v5i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_lt_u32 s0, s5
+; GFX8-NEXT: s_cselect_b32 s5, s0, s5
+; GFX8-NEXT: s_sub_i32 s0, s0, s5
+; GFX8-NEXT: s_cmp_lt_u32 s1, s6
+; GFX8-NEXT: s_cselect_b32 s5, s1, s6
+; GFX8-NEXT: s_sub_i32 s1, s1, s5
+; GFX8-NEXT: s_cmp_lt_u32 s2, s7
+; GFX8-NEXT: s_cselect_b32 s5, s2, s7
+; GFX8-NEXT: s_sub_i32 s2, s2, s5
+; GFX8-NEXT: s_cmp_lt_u32 s3, s8
+; GFX8-NEXT: s_cselect_b32 s5, s3, s8
+; GFX8-NEXT: s_sub_i32 s3, s3, s5
+; GFX8-NEXT: s_cmp_lt_u32 s4, s9
+; GFX8-NEXT: s_cselect_b32 s5, s4, s9
+; GFX8-NEXT: s_sub_i32 s4, s4, s5
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_v5i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_lt_u32 s0, s5
+; GFX9-NEXT: s_cselect_b32 s5, s0, s5
+; GFX9-NEXT: s_sub_i32 s0, s0, s5
+; GFX9-NEXT: s_cmp_lt_u32 s1, s6
+; GFX9-NEXT: s_cselect_b32 s5, s1, s6
+; GFX9-NEXT: s_sub_i32 s1, s1, s5
+; GFX9-NEXT: s_cmp_lt_u32 s2, s7
+; GFX9-NEXT: s_cselect_b32 s5, s2, s7
+; GFX9-NEXT: s_sub_i32 s2, s2, s5
+; GFX9-NEXT: s_cmp_lt_u32 s3, s8
+; GFX9-NEXT: s_cselect_b32 s5, s3, s8
+; GFX9-NEXT: s_sub_i32 s3, s3, s5
+; GFX9-NEXT: s_cmp_lt_u32 s4, s9
+; GFX9-NEXT: s_cselect_b32 s5, s4, s9
+; GFX9-NEXT: s_sub_i32 s4, s4, s5
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_v5i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_lt_u32 s0, s5
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s5, s0, s5
+; GFX10-NEXT: s_sub_i32 s0, s0, s5
+; GFX10-NEXT: s_cmp_lt_u32 s1, s6
+; GFX10-NEXT: s_cselect_b32 s5, s1, s6
+; GFX10-NEXT: s_sub_i32 s1, s1, s5
+; GFX10-NEXT: s_cmp_lt_u32 s2, s7
+; GFX10-NEXT: s_cselect_b32 s5, s2, s7
+; GFX10-NEXT: s_sub_i32 s2, s2, s5
+; GFX10-NEXT: s_cmp_lt_u32 s3, s8
+; GFX10-NEXT: s_cselect_b32 s5, s3, s8
+; GFX10-NEXT: s_sub_i32 s3, s3, s5
+; GFX10-NEXT: s_cmp_lt_u32 s4, s9
+; GFX10-NEXT: s_cselect_b32 s5, s4, s9
+; GFX10-NEXT: s_sub_i32 s4, s4, s5
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <5 x i32> @llvm.usub.sat.v5i32(<5 x i32> %lhs, <5 x i32> %rhs)
+ ret <5 x i32> %result
+}
+
+define <16 x i32> @v_usubsat_v16i32(<16 x i32> %lhs, <16 x i32> %rhs) {
+; GFX6-LABEL: v_usubsat_v16i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_min_u32_e32 v16, v0, v16
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v16
+; GFX6-NEXT: v_min_u32_e32 v16, v1, v17
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v16
+; GFX6-NEXT: v_min_u32_e32 v16, v2, v18
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v16
+; GFX6-NEXT: v_min_u32_e32 v16, v3, v19
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, v3, v16
+; GFX6-NEXT: v_min_u32_e32 v16, v4, v20
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, v4, v16
+; GFX6-NEXT: v_min_u32_e32 v16, v5, v21
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v5, v16
+; GFX6-NEXT: v_min_u32_e32 v16, v6, v22
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, v6, v16
+; GFX6-NEXT: v_min_u32_e32 v16, v7, v23
+; GFX6-NEXT: v_sub_i32_e32 v7, vcc, v7, v16
+; GFX6-NEXT: v_min_u32_e32 v16, v8, v24
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, v8, v16
+; GFX6-NEXT: v_min_u32_e32 v16, v9, v25
+; GFX6-NEXT: v_sub_i32_e32 v9, vcc, v9, v16
+; GFX6-NEXT: v_min_u32_e32 v16, v10, v26
+; GFX6-NEXT: v_sub_i32_e32 v10, vcc, v10, v16
+; GFX6-NEXT: v_min_u32_e32 v16, v11, v27
+; GFX6-NEXT: v_sub_i32_e32 v11, vcc, v11, v16
+; GFX6-NEXT: v_min_u32_e32 v16, v12, v28
+; GFX6-NEXT: v_sub_i32_e32 v12, vcc, v12, v16
+; GFX6-NEXT: v_min_u32_e32 v16, v13, v29
+; GFX6-NEXT: v_sub_i32_e32 v13, vcc, v13, v16
+; GFX6-NEXT: v_min_u32_e32 v16, v14, v30
+; GFX6-NEXT: v_sub_i32_e32 v14, vcc, v14, v16
+; GFX6-NEXT: v_min_u32_e32 v16, v15, v31
+; GFX6-NEXT: v_sub_i32_e32 v15, vcc, v15, v16
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_v16i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u32_e32 v16, v0, v16
+; GFX8-NEXT: v_sub_u32_e32 v0, vcc, v0, v16
+; GFX8-NEXT: v_min_u32_e32 v16, v1, v17
+; GFX8-NEXT: v_sub_u32_e32 v1, vcc, v1, v16
+; GFX8-NEXT: v_min_u32_e32 v16, v2, v18
+; GFX8-NEXT: v_sub_u32_e32 v2, vcc, v2, v16
+; GFX8-NEXT: v_min_u32_e32 v16, v3, v19
+; GFX8-NEXT: v_sub_u32_e32 v3, vcc, v3, v16
+; GFX8-NEXT: v_min_u32_e32 v16, v4, v20
+; GFX8-NEXT: v_sub_u32_e32 v4, vcc, v4, v16
+; GFX8-NEXT: v_min_u32_e32 v16, v5, v21
+; GFX8-NEXT: v_sub_u32_e32 v5, vcc, v5, v16
+; GFX8-NEXT: v_min_u32_e32 v16, v6, v22
+; GFX8-NEXT: v_sub_u32_e32 v6, vcc, v6, v16
+; GFX8-NEXT: v_min_u32_e32 v16, v7, v23
+; GFX8-NEXT: v_sub_u32_e32 v7, vcc, v7, v16
+; GFX8-NEXT: v_min_u32_e32 v16, v8, v24
+; GFX8-NEXT: v_sub_u32_e32 v8, vcc, v8, v16
+; GFX8-NEXT: v_min_u32_e32 v16, v9, v25
+; GFX8-NEXT: v_sub_u32_e32 v9, vcc, v9, v16
+; GFX8-NEXT: v_min_u32_e32 v16, v10, v26
+; GFX8-NEXT: v_sub_u32_e32 v10, vcc, v10, v16
+; GFX8-NEXT: v_min_u32_e32 v16, v11, v27
+; GFX8-NEXT: v_sub_u32_e32 v11, vcc, v11, v16
+; GFX8-NEXT: v_min_u32_e32 v16, v12, v28
+; GFX8-NEXT: v_sub_u32_e32 v12, vcc, v12, v16
+; GFX8-NEXT: v_min_u32_e32 v16, v13, v29
+; GFX8-NEXT: v_sub_u32_e32 v13, vcc, v13, v16
+; GFX8-NEXT: v_min_u32_e32 v16, v14, v30
+; GFX8-NEXT: v_sub_u32_e32 v14, vcc, v14, v16
+; GFX8-NEXT: v_min_u32_e32 v16, v15, v31
+; GFX8-NEXT: v_sub_u32_e32 v15, vcc, v15, v16
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_v16i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_u32_e32 v16, v0, v16
+; GFX9-NEXT: v_sub_u32_e32 v0, v0, v16
+; GFX9-NEXT: v_min_u32_e32 v16, v1, v17
+; GFX9-NEXT: v_sub_u32_e32 v1, v1, v16
+; GFX9-NEXT: v_min_u32_e32 v16, v2, v18
+; GFX9-NEXT: v_sub_u32_e32 v2, v2, v16
+; GFX9-NEXT: v_min_u32_e32 v16, v3, v19
+; GFX9-NEXT: v_sub_u32_e32 v3, v3, v16
+; GFX9-NEXT: v_min_u32_e32 v16, v4, v20
+; GFX9-NEXT: v_sub_u32_e32 v4, v4, v16
+; GFX9-NEXT: v_min_u32_e32 v16, v5, v21
+; GFX9-NEXT: v_sub_u32_e32 v5, v5, v16
+; GFX9-NEXT: v_min_u32_e32 v16, v6, v22
+; GFX9-NEXT: v_sub_u32_e32 v6, v6, v16
+; GFX9-NEXT: v_min_u32_e32 v16, v7, v23
+; GFX9-NEXT: v_sub_u32_e32 v7, v7, v16
+; GFX9-NEXT: v_min_u32_e32 v16, v8, v24
+; GFX9-NEXT: v_sub_u32_e32 v8, v8, v16
+; GFX9-NEXT: v_min_u32_e32 v16, v9, v25
+; GFX9-NEXT: v_sub_u32_e32 v9, v9, v16
+; GFX9-NEXT: v_min_u32_e32 v16, v10, v26
+; GFX9-NEXT: v_sub_u32_e32 v10, v10, v16
+; GFX9-NEXT: v_min_u32_e32 v16, v11, v27
+; GFX9-NEXT: v_sub_u32_e32 v11, v11, v16
+; GFX9-NEXT: v_min_u32_e32 v16, v12, v28
+; GFX9-NEXT: v_sub_u32_e32 v12, v12, v16
+; GFX9-NEXT: v_min_u32_e32 v16, v13, v29
+; GFX9-NEXT: v_sub_u32_e32 v13, v13, v16
+; GFX9-NEXT: v_min_u32_e32 v16, v14, v30
+; GFX9-NEXT: v_sub_u32_e32 v14, v14, v16
+; GFX9-NEXT: v_min_u32_e32 v16, v15, v31
+; GFX9-NEXT: v_sub_u32_e32 v15, v15, v16
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_v16i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_u32_e32 v35, v0, v16
+; GFX10-NEXT: v_min_u32_e32 v16, v1, v17
+; GFX10-NEXT: v_min_u32_e32 v17, v2, v18
+; GFX10-NEXT: v_min_u32_e32 v18, v3, v19
+; GFX10-NEXT: v_min_u32_e32 v19, v4, v20
+; GFX10-NEXT: v_min_u32_e32 v20, v5, v21
+; GFX10-NEXT: v_sub_nc_u32_e32 v1, v1, v16
+; GFX10-NEXT: v_min_u32_e32 v16, v6, v22
+; GFX10-NEXT: v_sub_nc_u32_e32 v2, v2, v17
+; GFX10-NEXT: v_min_u32_e32 v17, v7, v23
+; GFX10-NEXT: v_sub_nc_u32_e32 v3, v3, v18
+; GFX10-NEXT: v_min_u32_e32 v18, v8, v24
+; GFX10-NEXT: v_sub_nc_u32_e32 v4, v4, v19
+; GFX10-NEXT: v_min_u32_e32 v19, v9, v25
+; GFX10-NEXT: v_sub_nc_u32_e32 v5, v5, v20
+; GFX10-NEXT: v_min_u32_e32 v20, v10, v26
+; GFX10-NEXT: v_sub_nc_u32_e32 v6, v6, v16
+; GFX10-NEXT: v_min_u32_e32 v16, v11, v27
+; GFX10-NEXT: v_sub_nc_u32_e32 v7, v7, v17
+; GFX10-NEXT: v_min_u32_e32 v17, v12, v28
+; GFX10-NEXT: v_sub_nc_u32_e32 v8, v8, v18
+; GFX10-NEXT: v_min_u32_e32 v18, v13, v29
+; GFX10-NEXT: v_sub_nc_u32_e32 v9, v9, v19
+; GFX10-NEXT: v_min_u32_e32 v19, v14, v30
+; GFX10-NEXT: v_sub_nc_u32_e32 v10, v10, v20
+; GFX10-NEXT: v_min_u32_e32 v20, v15, v31
+; GFX10-NEXT: v_sub_nc_u32_e32 v0, v0, v35
+; GFX10-NEXT: v_sub_nc_u32_e32 v11, v11, v16
+; GFX10-NEXT: v_sub_nc_u32_e32 v12, v12, v17
+; GFX10-NEXT: v_sub_nc_u32_e32 v13, v13, v18
+; GFX10-NEXT: v_sub_nc_u32_e32 v14, v14, v19
+; GFX10-NEXT: v_sub_nc_u32_e32 v15, v15, v20
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %lhs, <16 x i32> %rhs)
+ ret <16 x i32> %result
+}
+
+define amdgpu_ps <16 x i32> @s_usubsat_v16i32(<16 x i32> inreg %lhs, <16 x i32> inreg %rhs) {
+; GFX6-LABEL: s_usubsat_v16i32:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_cmp_lt_u32 s0, s16
+; GFX6-NEXT: s_cselect_b32 s16, s0, s16
+; GFX6-NEXT: s_sub_i32 s0, s0, s16
+; GFX6-NEXT: s_cmp_lt_u32 s1, s17
+; GFX6-NEXT: s_cselect_b32 s16, s1, s17
+; GFX6-NEXT: s_sub_i32 s1, s1, s16
+; GFX6-NEXT: s_cmp_lt_u32 s2, s18
+; GFX6-NEXT: s_cselect_b32 s16, s2, s18
+; GFX6-NEXT: s_sub_i32 s2, s2, s16
+; GFX6-NEXT: s_cmp_lt_u32 s3, s19
+; GFX6-NEXT: s_cselect_b32 s16, s3, s19
+; GFX6-NEXT: s_sub_i32 s3, s3, s16
+; GFX6-NEXT: s_cmp_lt_u32 s4, s20
+; GFX6-NEXT: s_cselect_b32 s16, s4, s20
+; GFX6-NEXT: s_sub_i32 s4, s4, s16
+; GFX6-NEXT: s_cmp_lt_u32 s5, s21
+; GFX6-NEXT: s_cselect_b32 s16, s5, s21
+; GFX6-NEXT: s_sub_i32 s5, s5, s16
+; GFX6-NEXT: s_cmp_lt_u32 s6, s22
+; GFX6-NEXT: s_cselect_b32 s16, s6, s22
+; GFX6-NEXT: s_sub_i32 s6, s6, s16
+; GFX6-NEXT: s_cmp_lt_u32 s7, s23
+; GFX6-NEXT: s_cselect_b32 s16, s7, s23
+; GFX6-NEXT: s_sub_i32 s7, s7, s16
+; GFX6-NEXT: s_cmp_lt_u32 s8, s24
+; GFX6-NEXT: s_cselect_b32 s16, s8, s24
+; GFX6-NEXT: s_sub_i32 s8, s8, s16
+; GFX6-NEXT: s_cmp_lt_u32 s9, s25
+; GFX6-NEXT: s_cselect_b32 s16, s9, s25
+; GFX6-NEXT: s_sub_i32 s9, s9, s16
+; GFX6-NEXT: s_cmp_lt_u32 s10, s26
+; GFX6-NEXT: s_cselect_b32 s16, s10, s26
+; GFX6-NEXT: s_sub_i32 s10, s10, s16
+; GFX6-NEXT: s_cmp_lt_u32 s11, s27
+; GFX6-NEXT: s_cselect_b32 s16, s11, s27
+; GFX6-NEXT: s_sub_i32 s11, s11, s16
+; GFX6-NEXT: s_cmp_lt_u32 s12, s28
+; GFX6-NEXT: s_cselect_b32 s16, s12, s28
+; GFX6-NEXT: s_sub_i32 s12, s12, s16
+; GFX6-NEXT: s_cmp_lt_u32 s13, s29
+; GFX6-NEXT: s_cselect_b32 s16, s13, s29
+; GFX6-NEXT: s_sub_i32 s13, s13, s16
+; GFX6-NEXT: s_cmp_lt_u32 s14, s30
+; GFX6-NEXT: s_cselect_b32 s16, s14, s30
+; GFX6-NEXT: s_sub_i32 s14, s14, s16
+; GFX6-NEXT: s_cmp_lt_u32 s15, s31
+; GFX6-NEXT: s_cselect_b32 s16, s15, s31
+; GFX6-NEXT: s_sub_i32 s15, s15, s16
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_v16i32:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_cmp_lt_u32 s0, s16
+; GFX8-NEXT: s_cselect_b32 s16, s0, s16
+; GFX8-NEXT: s_sub_i32 s0, s0, s16
+; GFX8-NEXT: s_cmp_lt_u32 s1, s17
+; GFX8-NEXT: s_cselect_b32 s16, s1, s17
+; GFX8-NEXT: s_sub_i32 s1, s1, s16
+; GFX8-NEXT: s_cmp_lt_u32 s2, s18
+; GFX8-NEXT: s_cselect_b32 s16, s2, s18
+; GFX8-NEXT: s_sub_i32 s2, s2, s16
+; GFX8-NEXT: s_cmp_lt_u32 s3, s19
+; GFX8-NEXT: s_cselect_b32 s16, s3, s19
+; GFX8-NEXT: s_sub_i32 s3, s3, s16
+; GFX8-NEXT: s_cmp_lt_u32 s4, s20
+; GFX8-NEXT: s_cselect_b32 s16, s4, s20
+; GFX8-NEXT: s_sub_i32 s4, s4, s16
+; GFX8-NEXT: s_cmp_lt_u32 s5, s21
+; GFX8-NEXT: s_cselect_b32 s16, s5, s21
+; GFX8-NEXT: s_sub_i32 s5, s5, s16
+; GFX8-NEXT: s_cmp_lt_u32 s6, s22
+; GFX8-NEXT: s_cselect_b32 s16, s6, s22
+; GFX8-NEXT: s_sub_i32 s6, s6, s16
+; GFX8-NEXT: s_cmp_lt_u32 s7, s23
+; GFX8-NEXT: s_cselect_b32 s16, s7, s23
+; GFX8-NEXT: s_sub_i32 s7, s7, s16
+; GFX8-NEXT: s_cmp_lt_u32 s8, s24
+; GFX8-NEXT: s_cselect_b32 s16, s8, s24
+; GFX8-NEXT: s_sub_i32 s8, s8, s16
+; GFX8-NEXT: s_cmp_lt_u32 s9, s25
+; GFX8-NEXT: s_cselect_b32 s16, s9, s25
+; GFX8-NEXT: s_sub_i32 s9, s9, s16
+; GFX8-NEXT: s_cmp_lt_u32 s10, s26
+; GFX8-NEXT: s_cselect_b32 s16, s10, s26
+; GFX8-NEXT: s_sub_i32 s10, s10, s16
+; GFX8-NEXT: s_cmp_lt_u32 s11, s27
+; GFX8-NEXT: s_cselect_b32 s16, s11, s27
+; GFX8-NEXT: s_sub_i32 s11, s11, s16
+; GFX8-NEXT: s_cmp_lt_u32 s12, s28
+; GFX8-NEXT: s_cselect_b32 s16, s12, s28
+; GFX8-NEXT: s_sub_i32 s12, s12, s16
+; GFX8-NEXT: s_cmp_lt_u32 s13, s29
+; GFX8-NEXT: s_cselect_b32 s16, s13, s29
+; GFX8-NEXT: s_sub_i32 s13, s13, s16
+; GFX8-NEXT: s_cmp_lt_u32 s14, s30
+; GFX8-NEXT: s_cselect_b32 s16, s14, s30
+; GFX8-NEXT: s_sub_i32 s14, s14, s16
+; GFX8-NEXT: s_cmp_lt_u32 s15, s31
+; GFX8-NEXT: s_cselect_b32 s16, s15, s31
+; GFX8-NEXT: s_sub_i32 s15, s15, s16
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_v16i32:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_cmp_lt_u32 s0, s16
+; GFX9-NEXT: s_cselect_b32 s16, s0, s16
+; GFX9-NEXT: s_sub_i32 s0, s0, s16
+; GFX9-NEXT: s_cmp_lt_u32 s1, s17
+; GFX9-NEXT: s_cselect_b32 s16, s1, s17
+; GFX9-NEXT: s_sub_i32 s1, s1, s16
+; GFX9-NEXT: s_cmp_lt_u32 s2, s18
+; GFX9-NEXT: s_cselect_b32 s16, s2, s18
+; GFX9-NEXT: s_sub_i32 s2, s2, s16
+; GFX9-NEXT: s_cmp_lt_u32 s3, s19
+; GFX9-NEXT: s_cselect_b32 s16, s3, s19
+; GFX9-NEXT: s_sub_i32 s3, s3, s16
+; GFX9-NEXT: s_cmp_lt_u32 s4, s20
+; GFX9-NEXT: s_cselect_b32 s16, s4, s20
+; GFX9-NEXT: s_sub_i32 s4, s4, s16
+; GFX9-NEXT: s_cmp_lt_u32 s5, s21
+; GFX9-NEXT: s_cselect_b32 s16, s5, s21
+; GFX9-NEXT: s_sub_i32 s5, s5, s16
+; GFX9-NEXT: s_cmp_lt_u32 s6, s22
+; GFX9-NEXT: s_cselect_b32 s16, s6, s22
+; GFX9-NEXT: s_sub_i32 s6, s6, s16
+; GFX9-NEXT: s_cmp_lt_u32 s7, s23
+; GFX9-NEXT: s_cselect_b32 s16, s7, s23
+; GFX9-NEXT: s_sub_i32 s7, s7, s16
+; GFX9-NEXT: s_cmp_lt_u32 s8, s24
+; GFX9-NEXT: s_cselect_b32 s16, s8, s24
+; GFX9-NEXT: s_sub_i32 s8, s8, s16
+; GFX9-NEXT: s_cmp_lt_u32 s9, s25
+; GFX9-NEXT: s_cselect_b32 s16, s9, s25
+; GFX9-NEXT: s_sub_i32 s9, s9, s16
+; GFX9-NEXT: s_cmp_lt_u32 s10, s26
+; GFX9-NEXT: s_cselect_b32 s16, s10, s26
+; GFX9-NEXT: s_sub_i32 s10, s10, s16
+; GFX9-NEXT: s_cmp_lt_u32 s11, s27
+; GFX9-NEXT: s_cselect_b32 s16, s11, s27
+; GFX9-NEXT: s_sub_i32 s11, s11, s16
+; GFX9-NEXT: s_cmp_lt_u32 s12, s28
+; GFX9-NEXT: s_cselect_b32 s16, s12, s28
+; GFX9-NEXT: s_sub_i32 s12, s12, s16
+; GFX9-NEXT: s_cmp_lt_u32 s13, s29
+; GFX9-NEXT: s_cselect_b32 s16, s13, s29
+; GFX9-NEXT: s_sub_i32 s13, s13, s16
+; GFX9-NEXT: s_cmp_lt_u32 s14, s30
+; GFX9-NEXT: s_cselect_b32 s16, s14, s30
+; GFX9-NEXT: s_sub_i32 s14, s14, s16
+; GFX9-NEXT: s_cmp_lt_u32 s15, s31
+; GFX9-NEXT: s_cselect_b32 s16, s15, s31
+; GFX9-NEXT: s_sub_i32 s15, s15, s16
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_v16i32:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_cmp_lt_u32 s0, s16
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s46, s0, s16
+; GFX10-NEXT: s_sub_i32 s0, s0, s46
+; GFX10-NEXT: s_cmp_lt_u32 s1, s17
+; GFX10-NEXT: s_cselect_b32 s46, s1, s17
+; GFX10-NEXT: s_sub_i32 s1, s1, s46
+; GFX10-NEXT: s_cmp_lt_u32 s2, s18
+; GFX10-NEXT: s_cselect_b32 s16, s2, s18
+; GFX10-NEXT: s_sub_i32 s2, s2, s16
+; GFX10-NEXT: s_cmp_lt_u32 s3, s19
+; GFX10-NEXT: s_cselect_b32 s16, s3, s19
+; GFX10-NEXT: s_sub_i32 s3, s3, s16
+; GFX10-NEXT: s_cmp_lt_u32 s4, s20
+; GFX10-NEXT: s_cselect_b32 s16, s4, s20
+; GFX10-NEXT: s_sub_i32 s4, s4, s16
+; GFX10-NEXT: s_cmp_lt_u32 s5, s21
+; GFX10-NEXT: s_cselect_b32 s16, s5, s21
+; GFX10-NEXT: s_sub_i32 s5, s5, s16
+; GFX10-NEXT: s_cmp_lt_u32 s6, s22
+; GFX10-NEXT: s_cselect_b32 s16, s6, s22
+; GFX10-NEXT: s_sub_i32 s6, s6, s16
+; GFX10-NEXT: s_cmp_lt_u32 s7, s23
+; GFX10-NEXT: s_cselect_b32 s16, s7, s23
+; GFX10-NEXT: s_sub_i32 s7, s7, s16
+; GFX10-NEXT: s_cmp_lt_u32 s8, s24
+; GFX10-NEXT: s_cselect_b32 s16, s8, s24
+; GFX10-NEXT: s_sub_i32 s8, s8, s16
+; GFX10-NEXT: s_cmp_lt_u32 s9, s25
+; GFX10-NEXT: s_cselect_b32 s16, s9, s25
+; GFX10-NEXT: s_sub_i32 s9, s9, s16
+; GFX10-NEXT: s_cmp_lt_u32 s10, s26
+; GFX10-NEXT: s_cselect_b32 s16, s10, s26
+; GFX10-NEXT: s_sub_i32 s10, s10, s16
+; GFX10-NEXT: s_cmp_lt_u32 s11, s27
+; GFX10-NEXT: s_cselect_b32 s16, s11, s27
+; GFX10-NEXT: s_sub_i32 s11, s11, s16
+; GFX10-NEXT: s_cmp_lt_u32 s12, s28
+; GFX10-NEXT: s_cselect_b32 s16, s12, s28
+; GFX10-NEXT: s_sub_i32 s12, s12, s16
+; GFX10-NEXT: s_cmp_lt_u32 s13, s29
+; GFX10-NEXT: s_cselect_b32 s16, s13, s29
+; GFX10-NEXT: s_sub_i32 s13, s13, s16
+; GFX10-NEXT: s_cmp_lt_u32 s14, s30
+; GFX10-NEXT: s_cselect_b32 s16, s14, s30
+; GFX10-NEXT: s_sub_i32 s14, s14, s16
+; GFX10-NEXT: s_cmp_lt_u32 s15, s31
+; GFX10-NEXT: s_cselect_b32 s16, s15, s31
+; GFX10-NEXT: s_sub_i32 s15, s15, s16
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <16 x i32> @llvm.usub.sat.v16i32(<16 x i32> %lhs, <16 x i32> %rhs)
+ ret <16 x i32> %result
+}
+
+define i16 @v_usubsat_i16(i16 %lhs, i16 %rhs) {
+; GFX6-LABEL: v_usubsat_i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_min_u32_e32 v1, v0, v1
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_min_u16_e32 v1, v0, v1
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_min_u16_e32 v1, v0, v1
+; GFX9-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_min_u16_e64 v1, v0, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i16 @llvm.usub.sat.i16(i16 %lhs, i16 %rhs)
+ ret i16 %result
+}
+
+define amdgpu_ps i16 @s_usubsat_i16(i16 inreg %lhs, i16 inreg %rhs) {
+; GFX6-LABEL: s_usubsat_i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_cmp_lt_u32 s0, s1
+; GFX6-NEXT: s_cselect_b32 s1, s0, s1
+; GFX6-NEXT: s_sub_i32 s0, s0, s1
+; GFX6-NEXT: s_lshr_b32 s0, s0, 16
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_bfe_u32 s2, s0, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s2, s1
+; GFX8-NEXT: s_cselect_b32 s1, s2, s1
+; GFX8-NEXT: s_sub_i32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_bfe_u32 s2, s0, 0x100000
+; GFX9-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX9-NEXT: s_cmp_lt_u32 s2, s1
+; GFX9-NEXT: s_cselect_b32 s1, s2, s1
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_bfe_u32 s2, s0, 0x100000
+; GFX10-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lt_u32 s2, s1
+; GFX10-NEXT: s_cselect_b32 s1, s2, s1
+; GFX10-NEXT: s_sub_i32 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i16 @llvm.usub.sat.i16(i16 %lhs, i16 %rhs)
+ ret i16 %result
+}
+
+define amdgpu_ps half @usubsat_i16_sv(i16 inreg %lhs, i16 %rhs) {
+; GFX6-LABEL: usubsat_i16_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_min_u32_e32 v0, s0, v0
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s0, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: usubsat_i16_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_min_u16_e32 v0, s0, v0
+; GFX8-NEXT: v_sub_u16_e32 v0, s0, v0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: usubsat_i16_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_u16_e32 v0, s0, v0
+; GFX9-NEXT: v_sub_u16_e32 v0, s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: usubsat_i16_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_min_u16_e64 v0, s0, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u16_e64 v0, s0, v0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i16 @llvm.usub.sat.i16(i16 %lhs, i16 %rhs)
+ %cast = bitcast i16 %result to half
+ ret half %cast
+}
+
+define amdgpu_ps half @usubsat_i16_vs(i16 %lhs, i16 inreg %rhs) {
+; GFX6-LABEL: usubsat_i16_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: v_min_u32_e32 v1, s0, v0
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: usubsat_i16_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_min_u16_e32 v1, s0, v0
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: usubsat_i16_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_min_u16_e32 v1, s0, v0
+; GFX9-NEXT: v_sub_u16_e32 v0, v0, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: usubsat_i16_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_min_u16_e64 v1, v0, s0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_nc_u16_e64 v0, v0, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i16 @llvm.usub.sat.i16(i16 %lhs, i16 %rhs)
+ %cast = bitcast i16 %result to half
+ ret half %cast
+}
+
+define <2 x i16> @v_usubsat_v2i16(<2 x i16> %lhs, <2 x i16> %rhs) {
+; GFX6-LABEL: v_usubsat_v2i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_min_u32_e32 v2, v0, v2
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v3
+; GFX6-NEXT: v_min_u32_e32 v2, v1, v2
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_v2i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX8-NEXT: v_min_u16_e32 v3, v0, v1
+; GFX8-NEXT: v_min_u16_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v3
+; GFX8-NEXT: v_sub_u16_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_u16 v1, v0, v1
+; GFX9-NEXT: v_pk_sub_i16 v0, v0, v1
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_pk_min_u16 v1, v0, v1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_pk_sub_i16 v0, v0, v1
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ ret <2 x i16> %result
+}
+
+define amdgpu_ps i32 @s_usubsat_v2i16(<2 x i16> inreg %lhs, <2 x i16> inreg %rhs) {
+; GFX6-LABEL: s_usubsat_v2i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_cmp_lt_u32 s0, s2
+; GFX6-NEXT: s_cselect_b32 s2, s0, s2
+; GFX6-NEXT: s_sub_i32 s0, s0, s2
+; GFX6-NEXT: s_lshr_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s2, s3, 16
+; GFX6-NEXT: s_cmp_lt_u32 s1, s2
+; GFX6-NEXT: s_cselect_b32 s2, s1, s2
+; GFX6-NEXT: s_sub_i32 s1, s1, s2
+; GFX6-NEXT: s_mov_b32 s2, 0xffff
+; GFX6-NEXT: s_lshr_b32 s1, s1, 16
+; GFX6-NEXT: s_and_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s0, s0, s2
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_v2i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshr_b32 s3, s1, 16
+; GFX8-NEXT: s_lshr_b32 s2, s0, 16
+; GFX8-NEXT: s_bfe_u32 s4, s0, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s4, s1
+; GFX8-NEXT: s_cselect_b32 s1, s4, s1
+; GFX8-NEXT: s_sub_i32 s0, s0, s1
+; GFX8-NEXT: s_bfe_u32 s1, s2, 0x100000
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s1, s3
+; GFX8-NEXT: s_cselect_b32 s1, s1, s3
+; GFX8-NEXT: s_sub_i32 s1, s2, s1
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s1, s1, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_v2i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s3, 0xffff
+; GFX9-NEXT: s_lshr_b32 s5, s1, 16
+; GFX9-NEXT: s_lshr_b32 s2, s0, 16
+; GFX9-NEXT: s_and_b32 s4, s0, s3
+; GFX9-NEXT: s_and_b32 s1, s1, s3
+; GFX9-NEXT: s_cmp_lt_u32 s4, s1
+; GFX9-NEXT: s_cselect_b32 s1, s4, s1
+; GFX9-NEXT: s_cmp_lt_u32 s2, s5
+; GFX9-NEXT: s_cselect_b32 s3, s2, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s3
+; GFX9-NEXT: s_lshr_b32 s3, s1, 16
+; GFX9-NEXT: s_sub_i32 s0, s0, s1
+; GFX9-NEXT: s_sub_i32 s1, s2, s3
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_v2i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_mov_b32 s2, 0xffff
+; GFX10-NEXT: s_lshr_b32 s3, s0, 16
+; GFX10-NEXT: s_and_b32 s4, s0, s2
+; GFX10-NEXT: s_and_b32 s2, s1, s2
+; GFX10-NEXT: s_lshr_b32 s1, s1, 16
+; GFX10-NEXT: s_cmp_lt_u32 s4, s2
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s2, s4, s2
+; GFX10-NEXT: s_cmp_lt_u32 s3, s1
+; GFX10-NEXT: s_cselect_b32 s1, s3, s1
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s2, s1
+; GFX10-NEXT: s_lshr_b32 s2, s1, 16
+; GFX10-NEXT: s_sub_i32 s0, s0, s1
+; GFX10-NEXT: s_sub_i32 s1, s3, s2
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ %cast = bitcast <2 x i16> %result to i32
+ ret i32 %cast
+}
+
+define amdgpu_ps float @usubsat_v2i16_sv(<2 x i16> inreg %lhs, <2 x i16> %rhs) {
+; GFX6-LABEL: usubsat_v2i16_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_min_u32_e32 v0, s0, v0
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, s0, v0
+; GFX6-NEXT: s_lshl_b32 s0, s1, 16
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_min_u32_e32 v1, s0, v1
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, s0, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s0, 0xffff
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s0, v1
+; GFX6-NEXT: v_and_b32_e32 v0, s0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: usubsat_v2i16_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshr_b32 s1, s0, 16
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: v_min_u16_e32 v1, s0, v0
+; GFX8-NEXT: v_min_u16_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_u16_e32 v1, s0, v1
+; GFX8-NEXT: v_sub_u16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v1, v0
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: usubsat_v2i16_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_pk_min_u16 v0, s0, v0
+; GFX9-NEXT: v_pk_sub_i16 v0, s0, v0
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: usubsat_v2i16_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_pk_min_u16 v0, s0, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_pk_sub_i16 v0, s0, v0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ %cast = bitcast <2 x i16> %result to float
+ ret float %cast
+}
+
+define amdgpu_ps float @usubsat_v2i16_vs(<2 x i16> %lhs, <2 x i16> inreg %rhs) {
+; GFX6-LABEL: usubsat_v2i16_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: v_min_u32_e32 v2, s0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_lshl_b32 s0, s1, 16
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v2
+; GFX6-NEXT: v_min_u32_e32 v2, s0, v1
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s0, 0xffff
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s0, v1
+; GFX6-NEXT: v_and_b32_e32 v0, s0, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: usubsat_v2i16_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX8-NEXT: s_lshr_b32 s1, s0, 16
+; GFX8-NEXT: v_min_u16_e32 v2, s0, v0
+; GFX8-NEXT: v_min_u16_e32 v3, s1, v1
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v2
+; GFX8-NEXT: v_sub_u16_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: usubsat_v2i16_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_pk_min_u16 v1, v0, s0
+; GFX9-NEXT: v_pk_sub_i16 v0, v0, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: usubsat_v2i16_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_pk_min_u16 v1, v0, s0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_pk_sub_i16 v0, v0, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> %lhs, <2 x i16> %rhs)
+ %cast = bitcast <2 x i16> %result to float
+ ret float %cast
+}
+
+; FIXME: v3i16 insert/extract
+; define <3 x i16> @v_usubsat_v3i16(<3 x i16> %lhs, <3 x i16> %rhs) {
+; %result = call <3 x i16> @llvm.usub.sat.v3i16(<3 x i16> %lhs, <3 x i16> %rhs)
+; ret <3 x i16> %result
+; }
+
+; define amdgpu_ps <3 x i16> @s_usubsat_v3i16(<3 x i16> inreg %lhs, <3 x i16> inreg %rhs) {
+; %result = call <3 x i16> @llvm.usub.sat.v3i16(<3 x i16> %lhs, <3 x i16> %rhs)
+; ret <3 x i16> %result
+; }
+
+define <2 x float> @v_usubsat_v4i16(<4 x i16> %lhs, <4 x i16> %rhs) {
+; GFX6-LABEL: v_usubsat_v4i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_min_u32_e32 v4, v0, v4
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v5
+; GFX6-NEXT: v_min_u32_e32 v4, v1, v4
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v6
+; GFX6-NEXT: v_min_u32_e32 v4, v2, v4
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v7
+; GFX6-NEXT: v_min_u32_e32 v4, v3, v4
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, v3, v4
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_v4i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v4, 16, v0
+; GFX8-NEXT: v_min_u16_e32 v6, v0, v2
+; GFX8-NEXT: v_min_u16_sdwa v2, v4, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshrrev_b32_e32 v5, 16, v1
+; GFX8-NEXT: v_min_u16_e32 v7, v1, v3
+; GFX8-NEXT: v_min_u16_sdwa v3, v5, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v6
+; GFX8-NEXT: v_sub_u16_sdwa v2, v4, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v2
+; GFX8-NEXT: v_sub_u16_e32 v1, v1, v7
+; GFX8-NEXT: v_sub_u16_sdwa v2, v5, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_v4i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_u16 v2, v0, v2
+; GFX9-NEXT: v_pk_sub_i16 v0, v0, v2
+; GFX9-NEXT: v_pk_min_u16 v2, v1, v3
+; GFX9-NEXT: v_pk_sub_i16 v1, v1, v2
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_v4i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_pk_min_u16 v2, v0, v2
+; GFX10-NEXT: v_pk_min_u16 v3, v1, v3
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_pk_sub_i16 v0, v0, v2
+; GFX10-NEXT: v_pk_sub_i16 v1, v1, v3
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+ %cast = bitcast <4 x i16> %result to <2 x float>
+ ret <2 x float> %cast
+}
+
+define amdgpu_ps <2 x i32> @s_usubsat_v4i16(<4 x i16> inreg %lhs, <4 x i16> inreg %rhs) {
+; GFX6-LABEL: s_usubsat_v4i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_cmp_lt_u32 s0, s4
+; GFX6-NEXT: s_cselect_b32 s4, s0, s4
+; GFX6-NEXT: s_sub_i32 s0, s0, s4
+; GFX6-NEXT: s_lshr_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s4, s5, 16
+; GFX6-NEXT: s_cmp_lt_u32 s1, s4
+; GFX6-NEXT: s_cselect_b32 s4, s1, s4
+; GFX6-NEXT: s_sub_i32 s1, s1, s4
+; GFX6-NEXT: s_lshr_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s4, s6, 16
+; GFX6-NEXT: s_cmp_lt_u32 s2, s4
+; GFX6-NEXT: s_cselect_b32 s4, s2, s4
+; GFX6-NEXT: s_sub_i32 s2, s2, s4
+; GFX6-NEXT: s_lshr_b32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s4, s7, 16
+; GFX6-NEXT: s_cmp_lt_u32 s3, s4
+; GFX6-NEXT: s_cselect_b32 s4, s3, s4
+; GFX6-NEXT: s_sub_i32 s3, s3, s4
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: s_and_b32 s1, s1, s4
+; GFX6-NEXT: s_lshr_b32 s3, s3, 16
+; GFX6-NEXT: s_and_b32 s0, s0, s4
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s4
+; GFX6-NEXT: s_and_b32 s2, s3, s4
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_or_b32 s1, s1, s2
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_v4i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshr_b32 s6, s2, 16
+; GFX8-NEXT: s_lshr_b32 s4, s0, 16
+; GFX8-NEXT: s_lshr_b32 s5, s1, 16
+; GFX8-NEXT: s_lshr_b32 s7, s3, 16
+; GFX8-NEXT: s_bfe_u32 s8, s0, 0x100000
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s8, s2
+; GFX8-NEXT: s_cselect_b32 s2, s8, s2
+; GFX8-NEXT: s_sub_i32 s0, s0, s2
+; GFX8-NEXT: s_bfe_u32 s2, s4, 0x100000
+; GFX8-NEXT: s_bfe_u32 s6, s6, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s2, s6
+; GFX8-NEXT: s_cselect_b32 s2, s2, s6
+; GFX8-NEXT: s_sub_i32 s2, s4, s2
+; GFX8-NEXT: s_bfe_u32 s4, s1, 0x100000
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s4, s3
+; GFX8-NEXT: s_cselect_b32 s3, s4, s3
+; GFX8-NEXT: s_sub_i32 s1, s1, s3
+; GFX8-NEXT: s_bfe_u32 s3, s5, 0x100000
+; GFX8-NEXT: s_bfe_u32 s4, s7, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s3, s4
+; GFX8-NEXT: s_cselect_b32 s3, s3, s4
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_sub_i32 s3, s5, s3
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s2, s2, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s2
+; GFX8-NEXT: s_bfe_u32 s2, s3, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_lshl_b32 s2, s2, 16
+; GFX8-NEXT: s_or_b32 s1, s1, s2
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_v4i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s5, 0xffff
+; GFX9-NEXT: s_lshr_b32 s7, s2, 16
+; GFX9-NEXT: s_lshr_b32 s4, s0, 16
+; GFX9-NEXT: s_and_b32 s6, s0, s5
+; GFX9-NEXT: s_and_b32 s2, s2, s5
+; GFX9-NEXT: s_cmp_lt_u32 s6, s2
+; GFX9-NEXT: s_cselect_b32 s2, s6, s2
+; GFX9-NEXT: s_cmp_lt_u32 s4, s7
+; GFX9-NEXT: s_cselect_b32 s6, s4, s7
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s6
+; GFX9-NEXT: s_lshr_b32 s6, s2, 16
+; GFX9-NEXT: s_sub_i32 s0, s0, s2
+; GFX9-NEXT: s_sub_i32 s2, s4, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s2
+; GFX9-NEXT: s_lshr_b32 s6, s3, 16
+; GFX9-NEXT: s_lshr_b32 s2, s1, 16
+; GFX9-NEXT: s_and_b32 s4, s1, s5
+; GFX9-NEXT: s_and_b32 s3, s3, s5
+; GFX9-NEXT: s_cmp_lt_u32 s4, s3
+; GFX9-NEXT: s_cselect_b32 s3, s4, s3
+; GFX9-NEXT: s_cmp_lt_u32 s2, s6
+; GFX9-NEXT: s_cselect_b32 s4, s2, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4
+; GFX9-NEXT: s_lshr_b32 s4, s3, 16
+; GFX9-NEXT: s_sub_i32 s1, s1, s3
+; GFX9-NEXT: s_sub_i32 s2, s2, s4
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s2
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_v4i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_mov_b32 s4, 0xffff
+; GFX10-NEXT: s_lshr_b32 s5, s0, 16
+; GFX10-NEXT: s_and_b32 s7, s2, s4
+; GFX10-NEXT: s_and_b32 s6, s0, s4
+; GFX10-NEXT: s_lshr_b32 s2, s2, 16
+; GFX10-NEXT: s_cmp_lt_u32 s6, s7
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s6, s6, s7
+; GFX10-NEXT: s_cmp_lt_u32 s5, s2
+; GFX10-NEXT: s_cselect_b32 s2, s5, s2
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s6, s2
+; GFX10-NEXT: s_lshr_b32 s6, s2, 16
+; GFX10-NEXT: s_sub_i32 s0, s0, s2
+; GFX10-NEXT: s_sub_i32 s2, s5, s6
+; GFX10-NEXT: s_and_b32 s6, s1, s4
+; GFX10-NEXT: s_and_b32 s4, s3, s4
+; GFX10-NEXT: s_lshr_b32 s5, s1, 16
+; GFX10-NEXT: s_lshr_b32 s3, s3, 16
+; GFX10-NEXT: s_cmp_lt_u32 s6, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s2
+; GFX10-NEXT: s_cselect_b32 s4, s6, s4
+; GFX10-NEXT: s_cmp_lt_u32 s5, s3
+; GFX10-NEXT: s_cselect_b32 s3, s5, s3
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s4, s3
+; GFX10-NEXT: s_lshr_b32 s4, s3, 16
+; GFX10-NEXT: s_sub_i32 s1, s1, s3
+; GFX10-NEXT: s_sub_i32 s3, s5, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <4 x i16> @llvm.usub.sat.v4i16(<4 x i16> %lhs, <4 x i16> %rhs)
+ %cast = bitcast <4 x i16> %result to <2 x i32>
+ ret <2 x i32> %cast
+}
+
+; FIXME
+; define <5 x i16> @v_usubsat_v5i16(<5 x i16> %lhs, <5 x i16> %rhs) {
+; %result = call <5 x i16> @llvm.usub.sat.v5i16(<5 x i16> %lhs, <5 x i16> %rhs)
+; ret <5 x i16> %result
+; }
+
+; define amdgpu_ps <5 x i16> @s_usubsat_v5i16(<5 x i16> inreg %lhs, <5 x i16> inreg %rhs) {
+; %result = call <5 x i16> @llvm.usub.sat.v5i16(<5 x i16> %lhs, <5 x i16> %rhs)
+; ret <5 x i16> %result
+; }
+
+define <3 x float> @v_usubsat_v6i16(<6 x i16> %lhs, <6 x i16> %rhs) {
+; GFX6-LABEL: v_usubsat_v6i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX6-NEXT: v_min_u32_e32 v6, v0, v6
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v7
+; GFX6-NEXT: v_min_u32_e32 v6, v1, v6
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v8
+; GFX6-NEXT: v_min_u32_e32 v6, v2, v6
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v9
+; GFX6-NEXT: v_min_u32_e32 v6, v3, v6
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, v3, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v10
+; GFX6-NEXT: v_min_u32_e32 v6, v4, v6
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, v4, v6
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v11
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_min_u32_e32 v6, v5, v6
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v5, v6
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_and_b32_e32 v3, s4, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_v6i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v6, 16, v0
+; GFX8-NEXT: v_min_u16_e32 v9, v0, v3
+; GFX8-NEXT: v_min_u16_sdwa v3, v6, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshrrev_b32_e32 v7, 16, v1
+; GFX8-NEXT: v_min_u16_e32 v10, v1, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v2
+; GFX8-NEXT: v_min_u16_sdwa v4, v7, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v9
+; GFX8-NEXT: v_sub_u16_sdwa v3, v6, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_u16_e32 v11, v2, v5
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v3
+; GFX8-NEXT: v_min_u16_sdwa v5, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_u16_e32 v1, v1, v10
+; GFX8-NEXT: v_sub_u16_sdwa v3, v7, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v3
+; GFX8-NEXT: v_sub_u16_e32 v2, v2, v11
+; GFX8-NEXT: v_sub_u16_sdwa v3, v8, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_v6i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_u16 v3, v0, v3
+; GFX9-NEXT: v_pk_sub_i16 v0, v0, v3
+; GFX9-NEXT: v_pk_min_u16 v3, v1, v4
+; GFX9-NEXT: v_pk_sub_i16 v1, v1, v3
+; GFX9-NEXT: v_pk_min_u16 v3, v2, v5
+; GFX9-NEXT: v_pk_sub_i16 v2, v2, v3
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_v6i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_pk_min_u16 v3, v0, v3
+; GFX10-NEXT: v_pk_min_u16 v4, v1, v4
+; GFX10-NEXT: v_pk_min_u16 v5, v2, v5
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_pk_sub_i16 v0, v0, v3
+; GFX10-NEXT: v_pk_sub_i16 v1, v1, v4
+; GFX10-NEXT: v_pk_sub_i16 v2, v2, v5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <6 x i16> @llvm.usub.sat.v6i16(<6 x i16> %lhs, <6 x i16> %rhs)
+ %cast = bitcast <6 x i16> %result to <3 x float>
+ ret <3 x float> %cast
+}
+
+define amdgpu_ps <3 x i32> @s_usubsat_v6i16(<6 x i16> inreg %lhs, <6 x i16> inreg %rhs) {
+; GFX6-LABEL: s_usubsat_v6i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s6, s6, 16
+; GFX6-NEXT: s_cmp_lt_u32 s0, s6
+; GFX6-NEXT: s_cselect_b32 s6, s0, s6
+; GFX6-NEXT: s_sub_i32 s0, s0, s6
+; GFX6-NEXT: s_lshr_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s6, s7, 16
+; GFX6-NEXT: s_cmp_lt_u32 s1, s6
+; GFX6-NEXT: s_cselect_b32 s6, s1, s6
+; GFX6-NEXT: s_sub_i32 s1, s1, s6
+; GFX6-NEXT: s_lshr_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s6, s8, 16
+; GFX6-NEXT: s_cmp_lt_u32 s2, s6
+; GFX6-NEXT: s_cselect_b32 s6, s2, s6
+; GFX6-NEXT: s_sub_i32 s2, s2, s6
+; GFX6-NEXT: s_lshr_b32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s6, s9, 16
+; GFX6-NEXT: s_cmp_lt_u32 s3, s6
+; GFX6-NEXT: s_cselect_b32 s6, s3, s6
+; GFX6-NEXT: s_sub_i32 s3, s3, s6
+; GFX6-NEXT: s_lshr_b32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_lshl_b32 s6, s10, 16
+; GFX6-NEXT: s_cmp_lt_u32 s4, s6
+; GFX6-NEXT: s_cselect_b32 s6, s4, s6
+; GFX6-NEXT: s_sub_i32 s4, s4, s6
+; GFX6-NEXT: s_lshr_b32 s4, s4, 16
+; GFX6-NEXT: s_lshl_b32 s5, s5, 16
+; GFX6-NEXT: s_lshl_b32 s6, s11, 16
+; GFX6-NEXT: s_cmp_lt_u32 s5, s6
+; GFX6-NEXT: s_cselect_b32 s6, s5, s6
+; GFX6-NEXT: s_sub_i32 s5, s5, s6
+; GFX6-NEXT: s_mov_b32 s6, 0xffff
+; GFX6-NEXT: s_and_b32 s1, s1, s6
+; GFX6-NEXT: s_and_b32 s0, s0, s6
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s6
+; GFX6-NEXT: s_and_b32 s2, s3, s6
+; GFX6-NEXT: s_lshr_b32 s5, s5, 16
+; GFX6-NEXT: s_and_b32 s3, s5, s6
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_or_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s2, s4, s6
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_or_b32 s2, s2, s3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_v6i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshr_b32 s9, s3, 16
+; GFX8-NEXT: s_lshr_b32 s6, s0, 16
+; GFX8-NEXT: s_lshr_b32 s7, s1, 16
+; GFX8-NEXT: s_lshr_b32 s8, s2, 16
+; GFX8-NEXT: s_lshr_b32 s10, s4, 16
+; GFX8-NEXT: s_lshr_b32 s11, s5, 16
+; GFX8-NEXT: s_bfe_u32 s12, s0, 0x100000
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s12, s3
+; GFX8-NEXT: s_cselect_b32 s3, s12, s3
+; GFX8-NEXT: s_sub_i32 s0, s0, s3
+; GFX8-NEXT: s_bfe_u32 s3, s6, 0x100000
+; GFX8-NEXT: s_bfe_u32 s9, s9, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s3, s9
+; GFX8-NEXT: s_cselect_b32 s3, s3, s9
+; GFX8-NEXT: s_sub_i32 s3, s6, s3
+; GFX8-NEXT: s_bfe_u32 s6, s1, 0x100000
+; GFX8-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s6, s4
+; GFX8-NEXT: s_cselect_b32 s4, s6, s4
+; GFX8-NEXT: s_sub_i32 s1, s1, s4
+; GFX8-NEXT: s_bfe_u32 s4, s7, 0x100000
+; GFX8-NEXT: s_bfe_u32 s6, s10, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s4, s6
+; GFX8-NEXT: s_cselect_b32 s4, s4, s6
+; GFX8-NEXT: s_sub_i32 s4, s7, s4
+; GFX8-NEXT: s_bfe_u32 s6, s2, 0x100000
+; GFX8-NEXT: s_bfe_u32 s5, s5, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s6, s5
+; GFX8-NEXT: s_cselect_b32 s5, s6, s5
+; GFX8-NEXT: s_sub_i32 s2, s2, s5
+; GFX8-NEXT: s_bfe_u32 s5, s8, 0x100000
+; GFX8-NEXT: s_bfe_u32 s6, s11, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s5, s6
+; GFX8-NEXT: s_cselect_b32 s5, s5, s6
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s3, s3, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s3
+; GFX8-NEXT: s_bfe_u32 s3, s4, 0x100000
+; GFX8-NEXT: s_sub_i32 s5, s8, s5
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_lshl_b32 s3, s3, 16
+; GFX8-NEXT: s_or_b32 s1, s1, s3
+; GFX8-NEXT: s_bfe_u32 s3, s5, 0x100000
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_lshl_b32 s3, s3, 16
+; GFX8-NEXT: s_or_b32 s2, s2, s3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_v6i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s7, 0xffff
+; GFX9-NEXT: s_lshr_b32 s9, s3, 16
+; GFX9-NEXT: s_lshr_b32 s6, s0, 16
+; GFX9-NEXT: s_and_b32 s8, s0, s7
+; GFX9-NEXT: s_and_b32 s3, s3, s7
+; GFX9-NEXT: s_cmp_lt_u32 s8, s3
+; GFX9-NEXT: s_cselect_b32 s3, s8, s3
+; GFX9-NEXT: s_cmp_lt_u32 s6, s9
+; GFX9-NEXT: s_cselect_b32 s8, s6, s9
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s8
+; GFX9-NEXT: s_lshr_b32 s8, s3, 16
+; GFX9-NEXT: s_sub_i32 s0, s0, s3
+; GFX9-NEXT: s_sub_i32 s3, s6, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s3
+; GFX9-NEXT: s_lshr_b32 s8, s4, 16
+; GFX9-NEXT: s_lshr_b32 s3, s1, 16
+; GFX9-NEXT: s_and_b32 s6, s1, s7
+; GFX9-NEXT: s_and_b32 s4, s4, s7
+; GFX9-NEXT: s_cmp_lt_u32 s6, s4
+; GFX9-NEXT: s_cselect_b32 s4, s6, s4
+; GFX9-NEXT: s_cmp_lt_u32 s3, s8
+; GFX9-NEXT: s_cselect_b32 s6, s3, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s6
+; GFX9-NEXT: s_lshr_b32 s6, s4, 16
+; GFX9-NEXT: s_sub_i32 s1, s1, s4
+; GFX9-NEXT: s_sub_i32 s3, s3, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s3
+; GFX9-NEXT: s_lshr_b32 s6, s5, 16
+; GFX9-NEXT: s_lshr_b32 s3, s2, 16
+; GFX9-NEXT: s_and_b32 s4, s2, s7
+; GFX9-NEXT: s_and_b32 s5, s5, s7
+; GFX9-NEXT: s_cmp_lt_u32 s4, s5
+; GFX9-NEXT: s_cselect_b32 s4, s4, s5
+; GFX9-NEXT: s_cmp_lt_u32 s3, s6
+; GFX9-NEXT: s_cselect_b32 s5, s3, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX9-NEXT: s_lshr_b32 s5, s4, 16
+; GFX9-NEXT: s_sub_i32 s2, s2, s4
+; GFX9-NEXT: s_sub_i32 s3, s3, s5
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_v6i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_mov_b32 s6, 0xffff
+; GFX10-NEXT: s_lshr_b32 s7, s0, 16
+; GFX10-NEXT: s_and_b32 s9, s3, s6
+; GFX10-NEXT: s_and_b32 s8, s0, s6
+; GFX10-NEXT: s_lshr_b32 s3, s3, 16
+; GFX10-NEXT: s_cmp_lt_u32 s8, s9
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s8, s8, s9
+; GFX10-NEXT: s_cmp_lt_u32 s7, s3
+; GFX10-NEXT: s_cselect_b32 s3, s7, s3
+; GFX10-NEXT: s_and_b32 s9, s4, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s8, s3
+; GFX10-NEXT: s_lshr_b32 s4, s4, 16
+; GFX10-NEXT: s_lshr_b32 s8, s3, 16
+; GFX10-NEXT: s_sub_i32 s0, s0, s3
+; GFX10-NEXT: s_sub_i32 s3, s7, s8
+; GFX10-NEXT: s_and_b32 s8, s1, s6
+; GFX10-NEXT: s_lshr_b32 s7, s1, 16
+; GFX10-NEXT: s_cmp_lt_u32 s8, s9
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s3
+; GFX10-NEXT: s_cselect_b32 s8, s8, s9
+; GFX10-NEXT: s_cmp_lt_u32 s7, s4
+; GFX10-NEXT: s_cselect_b32 s4, s7, s4
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s8, s4
+; GFX10-NEXT: s_lshr_b32 s8, s4, 16
+; GFX10-NEXT: s_sub_i32 s1, s1, s4
+; GFX10-NEXT: s_sub_i32 s4, s7, s8
+; GFX10-NEXT: s_and_b32 s8, s2, s6
+; GFX10-NEXT: s_and_b32 s6, s5, s6
+; GFX10-NEXT: s_lshr_b32 s7, s2, 16
+; GFX10-NEXT: s_lshr_b32 s5, s5, 16
+; GFX10-NEXT: s_cmp_lt_u32 s8, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s4
+; GFX10-NEXT: s_cselect_b32 s6, s8, s6
+; GFX10-NEXT: s_cmp_lt_u32 s7, s5
+; GFX10-NEXT: s_cselect_b32 s5, s7, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s6, s5
+; GFX10-NEXT: s_lshr_b32 s3, s5, 16
+; GFX10-NEXT: s_sub_i32 s2, s2, s5
+; GFX10-NEXT: s_sub_i32 s3, s7, s3
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <6 x i16> @llvm.usub.sat.v6i16(<6 x i16> %lhs, <6 x i16> %rhs)
+ %cast = bitcast <6 x i16> %result to <3 x i32>
+ ret <3 x i32> %cast
+}
+
+define <4 x float> @v_usubsat_v8i16(<8 x i16> %lhs, <8 x i16> %rhs) {
+; GFX6-LABEL: v_usubsat_v8i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; GFX6-NEXT: v_min_u32_e32 v8, v0, v8
+; GFX6-NEXT: v_sub_i32_e32 v0, vcc, v0, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v9
+; GFX6-NEXT: v_min_u32_e32 v8, v1, v8
+; GFX6-NEXT: v_sub_i32_e32 v1, vcc, v1, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v10
+; GFX6-NEXT: v_min_u32_e32 v8, v2, v8
+; GFX6-NEXT: v_sub_i32_e32 v2, vcc, v2, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v11
+; GFX6-NEXT: v_min_u32_e32 v8, v3, v8
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, v3, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v12
+; GFX6-NEXT: v_min_u32_e32 v8, v4, v8
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, v4, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v13
+; GFX6-NEXT: v_min_u32_e32 v8, v5, v8
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, v5, v8
+; GFX6-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v14
+; GFX6-NEXT: v_min_u32_e32 v8, v6, v8
+; GFX6-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: s_mov_b32 s4, 0xffff
+; GFX6-NEXT: v_sub_i32_e32 v6, vcc, v6, v8
+; GFX6-NEXT: v_lshrrev_b32_e32 v0, 16, v0
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX6-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; GFX6-NEXT: v_lshlrev_b32_e32 v8, 16, v15
+; GFX6-NEXT: v_min_u32_e32 v8, v7, v8
+; GFX6-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_and_b32_e32 v0, s4, v0
+; GFX6-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX6-NEXT: v_sub_i32_e32 v7, vcc, v7, v8
+; GFX6-NEXT: v_or_b32_e32 v0, v0, v1
+; GFX6-NEXT: v_and_b32_e32 v1, s4, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v5, 16, v5
+; GFX6-NEXT: v_and_b32_e32 v3, s4, v5
+; GFX6-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX6-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_lshrrev_b32_e32 v7, 16, v7
+; GFX6-NEXT: v_or_b32_e32 v1, v1, v2
+; GFX6-NEXT: v_and_b32_e32 v2, s4, v4
+; GFX6-NEXT: v_and_b32_e32 v4, s4, v7
+; GFX6-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX6-NEXT: v_lshrrev_b32_e32 v6, 16, v6
+; GFX6-NEXT: v_or_b32_e32 v2, v2, v3
+; GFX6-NEXT: v_and_b32_e32 v3, s4, v6
+; GFX6-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; GFX6-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_v8i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; GFX8-NEXT: v_min_u16_e32 v12, v0, v4
+; GFX8-NEXT: v_min_u16_sdwa v4, v8, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_lshrrev_b32_e32 v9, 16, v1
+; GFX8-NEXT: v_min_u16_e32 v13, v1, v5
+; GFX8-NEXT: v_lshrrev_b32_e32 v10, 16, v2
+; GFX8-NEXT: v_min_u16_sdwa v5, v9, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_u16_e32 v0, v0, v12
+; GFX8-NEXT: v_sub_u16_sdwa v4, v8, v4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_u16_e32 v14, v2, v6
+; GFX8-NEXT: v_or_b32_e32 v0, v0, v4
+; GFX8-NEXT: v_lshrrev_b32_e32 v11, 16, v3
+; GFX8-NEXT: v_min_u16_sdwa v6, v10, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_u16_e32 v1, v1, v13
+; GFX8-NEXT: v_sub_u16_sdwa v4, v9, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_min_u16_e32 v15, v3, v7
+; GFX8-NEXT: v_or_b32_e32 v1, v1, v4
+; GFX8-NEXT: v_min_u16_sdwa v7, v11, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; GFX8-NEXT: v_sub_u16_e32 v2, v2, v14
+; GFX8-NEXT: v_sub_u16_sdwa v4, v10, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v2, v2, v4
+; GFX8-NEXT: v_sub_u16_e32 v3, v3, v15
+; GFX8-NEXT: v_sub_u16_sdwa v4, v11, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX8-NEXT: v_or_b32_e32 v3, v3, v4
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_v8i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_pk_min_u16 v4, v0, v4
+; GFX9-NEXT: v_pk_sub_i16 v0, v0, v4
+; GFX9-NEXT: v_pk_min_u16 v4, v1, v5
+; GFX9-NEXT: v_pk_sub_i16 v1, v1, v4
+; GFX9-NEXT: v_pk_min_u16 v4, v2, v6
+; GFX9-NEXT: v_pk_sub_i16 v2, v2, v4
+; GFX9-NEXT: v_pk_min_u16 v4, v3, v7
+; GFX9-NEXT: v_pk_sub_i16 v3, v3, v4
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_v8i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_pk_min_u16 v19, v2, v6
+; GFX10-NEXT: v_pk_min_u16 v11, v0, v4
+; GFX10-NEXT: v_pk_min_u16 v15, v1, v5
+; GFX10-NEXT: v_pk_min_u16 v6, v3, v7
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_pk_sub_i16 v2, v2, v19
+; GFX10-NEXT: v_pk_sub_i16 v0, v0, v11
+; GFX10-NEXT: v_pk_sub_i16 v1, v1, v15
+; GFX10-NEXT: v_pk_sub_i16 v3, v3, v6
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+ %cast = bitcast <8 x i16> %result to <4 x float>
+ ret <4 x float> %cast
+}
+
+define amdgpu_ps <4 x i32> @s_usubsat_v8i16(<8 x i16> inreg %lhs, <8 x i16> inreg %rhs) {
+; GFX6-LABEL: s_usubsat_v8i16:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_lshl_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s8, s8, 16
+; GFX6-NEXT: s_cmp_lt_u32 s0, s8
+; GFX6-NEXT: s_cselect_b32 s8, s0, s8
+; GFX6-NEXT: s_sub_i32 s0, s0, s8
+; GFX6-NEXT: s_lshr_b32 s0, s0, 16
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s8, s9, 16
+; GFX6-NEXT: s_cmp_lt_u32 s1, s8
+; GFX6-NEXT: s_cselect_b32 s8, s1, s8
+; GFX6-NEXT: s_sub_i32 s1, s1, s8
+; GFX6-NEXT: s_lshr_b32 s1, s1, 16
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s8, s10, 16
+; GFX6-NEXT: s_cmp_lt_u32 s2, s8
+; GFX6-NEXT: s_cselect_b32 s8, s2, s8
+; GFX6-NEXT: s_sub_i32 s2, s2, s8
+; GFX6-NEXT: s_lshr_b32 s2, s2, 16
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s8, s11, 16
+; GFX6-NEXT: s_cmp_lt_u32 s3, s8
+; GFX6-NEXT: s_cselect_b32 s8, s3, s8
+; GFX6-NEXT: s_sub_i32 s3, s3, s8
+; GFX6-NEXT: s_lshr_b32 s3, s3, 16
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_lshl_b32 s8, s12, 16
+; GFX6-NEXT: s_cmp_lt_u32 s4, s8
+; GFX6-NEXT: s_cselect_b32 s8, s4, s8
+; GFX6-NEXT: s_sub_i32 s4, s4, s8
+; GFX6-NEXT: s_lshr_b32 s4, s4, 16
+; GFX6-NEXT: s_lshl_b32 s5, s5, 16
+; GFX6-NEXT: s_lshl_b32 s8, s13, 16
+; GFX6-NEXT: s_cmp_lt_u32 s5, s8
+; GFX6-NEXT: s_cselect_b32 s8, s5, s8
+; GFX6-NEXT: s_sub_i32 s5, s5, s8
+; GFX6-NEXT: s_lshr_b32 s5, s5, 16
+; GFX6-NEXT: s_lshl_b32 s6, s6, 16
+; GFX6-NEXT: s_lshl_b32 s8, s14, 16
+; GFX6-NEXT: s_cmp_lt_u32 s6, s8
+; GFX6-NEXT: s_cselect_b32 s8, s6, s8
+; GFX6-NEXT: s_sub_i32 s6, s6, s8
+; GFX6-NEXT: s_lshr_b32 s6, s6, 16
+; GFX6-NEXT: s_lshl_b32 s7, s7, 16
+; GFX6-NEXT: s_lshl_b32 s8, s15, 16
+; GFX6-NEXT: s_cmp_lt_u32 s7, s8
+; GFX6-NEXT: s_cselect_b32 s8, s7, s8
+; GFX6-NEXT: s_sub_i32 s7, s7, s8
+; GFX6-NEXT: s_mov_b32 s8, 0xffff
+; GFX6-NEXT: s_and_b32 s1, s1, s8
+; GFX6-NEXT: s_and_b32 s0, s0, s8
+; GFX6-NEXT: s_lshl_b32 s1, s1, 16
+; GFX6-NEXT: s_or_b32 s0, s0, s1
+; GFX6-NEXT: s_and_b32 s1, s2, s8
+; GFX6-NEXT: s_and_b32 s2, s3, s8
+; GFX6-NEXT: s_and_b32 s3, s5, s8
+; GFX6-NEXT: s_lshl_b32 s2, s2, 16
+; GFX6-NEXT: s_lshr_b32 s7, s7, 16
+; GFX6-NEXT: s_or_b32 s1, s1, s2
+; GFX6-NEXT: s_and_b32 s2, s4, s8
+; GFX6-NEXT: s_and_b32 s4, s7, s8
+; GFX6-NEXT: s_lshl_b32 s3, s3, 16
+; GFX6-NEXT: s_or_b32 s2, s2, s3
+; GFX6-NEXT: s_and_b32 s3, s6, s8
+; GFX6-NEXT: s_lshl_b32 s4, s4, 16
+; GFX6-NEXT: s_or_b32 s3, s3, s4
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_v8i16:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_lshr_b32 s12, s4, 16
+; GFX8-NEXT: s_lshr_b32 s8, s0, 16
+; GFX8-NEXT: s_lshr_b32 s9, s1, 16
+; GFX8-NEXT: s_lshr_b32 s10, s2, 16
+; GFX8-NEXT: s_lshr_b32 s11, s3, 16
+; GFX8-NEXT: s_lshr_b32 s13, s5, 16
+; GFX8-NEXT: s_lshr_b32 s14, s6, 16
+; GFX8-NEXT: s_lshr_b32 s15, s7, 16
+; GFX8-NEXT: s_bfe_u32 s16, s0, 0x100000
+; GFX8-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s16, s4
+; GFX8-NEXT: s_cselect_b32 s4, s16, s4
+; GFX8-NEXT: s_sub_i32 s0, s0, s4
+; GFX8-NEXT: s_bfe_u32 s4, s8, 0x100000
+; GFX8-NEXT: s_bfe_u32 s12, s12, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s4, s12
+; GFX8-NEXT: s_cselect_b32 s4, s4, s12
+; GFX8-NEXT: s_sub_i32 s4, s8, s4
+; GFX8-NEXT: s_bfe_u32 s8, s1, 0x100000
+; GFX8-NEXT: s_bfe_u32 s5, s5, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s8, s5
+; GFX8-NEXT: s_cselect_b32 s5, s8, s5
+; GFX8-NEXT: s_sub_i32 s1, s1, s5
+; GFX8-NEXT: s_bfe_u32 s5, s9, 0x100000
+; GFX8-NEXT: s_bfe_u32 s8, s13, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s5, s8
+; GFX8-NEXT: s_cselect_b32 s5, s5, s8
+; GFX8-NEXT: s_sub_i32 s5, s9, s5
+; GFX8-NEXT: s_bfe_u32 s8, s2, 0x100000
+; GFX8-NEXT: s_bfe_u32 s6, s6, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s8, s6
+; GFX8-NEXT: s_cselect_b32 s6, s8, s6
+; GFX8-NEXT: s_sub_i32 s2, s2, s6
+; GFX8-NEXT: s_bfe_u32 s6, s10, 0x100000
+; GFX8-NEXT: s_bfe_u32 s8, s14, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s6, s8
+; GFX8-NEXT: s_cselect_b32 s6, s6, s8
+; GFX8-NEXT: s_sub_i32 s6, s10, s6
+; GFX8-NEXT: s_bfe_u32 s8, s3, 0x100000
+; GFX8-NEXT: s_bfe_u32 s7, s7, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s8, s7
+; GFX8-NEXT: s_cselect_b32 s7, s8, s7
+; GFX8-NEXT: s_sub_i32 s3, s3, s7
+; GFX8-NEXT: s_bfe_u32 s7, s11, 0x100000
+; GFX8-NEXT: s_bfe_u32 s8, s15, 0x100000
+; GFX8-NEXT: s_cmp_lt_u32 s7, s8
+; GFX8-NEXT: s_cselect_b32 s7, s7, s8
+; GFX8-NEXT: s_bfe_u32 s4, s4, 0x100000
+; GFX8-NEXT: s_bfe_u32 s0, s0, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s0, s0, s4
+; GFX8-NEXT: s_bfe_u32 s4, s5, 0x100000
+; GFX8-NEXT: s_bfe_u32 s1, s1, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s1, s1, s4
+; GFX8-NEXT: s_bfe_u32 s4, s6, 0x100000
+; GFX8-NEXT: s_sub_i32 s7, s11, s7
+; GFX8-NEXT: s_bfe_u32 s2, s2, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s2, s2, s4
+; GFX8-NEXT: s_bfe_u32 s4, s7, 0x100000
+; GFX8-NEXT: s_bfe_u32 s3, s3, 0x100000
+; GFX8-NEXT: s_lshl_b32 s4, s4, 16
+; GFX8-NEXT: s_or_b32 s3, s3, s4
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_v8i16:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_mov_b32 s9, 0xffff
+; GFX9-NEXT: s_lshr_b32 s11, s4, 16
+; GFX9-NEXT: s_lshr_b32 s8, s0, 16
+; GFX9-NEXT: s_and_b32 s10, s0, s9
+; GFX9-NEXT: s_and_b32 s4, s4, s9
+; GFX9-NEXT: s_cmp_lt_u32 s10, s4
+; GFX9-NEXT: s_cselect_b32 s4, s10, s4
+; GFX9-NEXT: s_cmp_lt_u32 s8, s11
+; GFX9-NEXT: s_cselect_b32 s10, s8, s11
+; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s10
+; GFX9-NEXT: s_lshr_b32 s10, s4, 16
+; GFX9-NEXT: s_sub_i32 s0, s0, s4
+; GFX9-NEXT: s_sub_i32 s4, s8, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s0, s0, s4
+; GFX9-NEXT: s_lshr_b32 s10, s5, 16
+; GFX9-NEXT: s_lshr_b32 s4, s1, 16
+; GFX9-NEXT: s_and_b32 s8, s1, s9
+; GFX9-NEXT: s_and_b32 s5, s5, s9
+; GFX9-NEXT: s_cmp_lt_u32 s8, s5
+; GFX9-NEXT: s_cselect_b32 s5, s8, s5
+; GFX9-NEXT: s_cmp_lt_u32 s4, s10
+; GFX9-NEXT: s_cselect_b32 s8, s4, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s8
+; GFX9-NEXT: s_lshr_b32 s8, s5, 16
+; GFX9-NEXT: s_sub_i32 s1, s1, s5
+; GFX9-NEXT: s_sub_i32 s4, s4, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s1, s1, s4
+; GFX9-NEXT: s_lshr_b32 s8, s6, 16
+; GFX9-NEXT: s_lshr_b32 s4, s2, 16
+; GFX9-NEXT: s_and_b32 s5, s2, s9
+; GFX9-NEXT: s_and_b32 s6, s6, s9
+; GFX9-NEXT: s_cmp_lt_u32 s5, s6
+; GFX9-NEXT: s_cselect_b32 s5, s5, s6
+; GFX9-NEXT: s_cmp_lt_u32 s4, s8
+; GFX9-NEXT: s_cselect_b32 s6, s4, s8
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX9-NEXT: s_lshr_b32 s6, s5, 16
+; GFX9-NEXT: s_sub_i32 s2, s2, s5
+; GFX9-NEXT: s_sub_i32 s4, s4, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s2, s2, s4
+; GFX9-NEXT: s_lshr_b32 s6, s7, 16
+; GFX9-NEXT: s_lshr_b32 s4, s3, 16
+; GFX9-NEXT: s_and_b32 s5, s3, s9
+; GFX9-NEXT: s_and_b32 s7, s7, s9
+; GFX9-NEXT: s_cmp_lt_u32 s5, s7
+; GFX9-NEXT: s_cselect_b32 s5, s5, s7
+; GFX9-NEXT: s_cmp_lt_u32 s4, s6
+; GFX9-NEXT: s_cselect_b32 s6, s4, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s6
+; GFX9-NEXT: s_lshr_b32 s6, s5, 16
+; GFX9-NEXT: s_sub_i32 s3, s3, s5
+; GFX9-NEXT: s_sub_i32 s4, s4, s6
+; GFX9-NEXT: s_pack_ll_b32_b16 s3, s3, s4
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_v8i16:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_mov_b32 s8, 0xffff
+; GFX10-NEXT: s_lshr_b32 s9, s0, 16
+; GFX10-NEXT: s_and_b32 s11, s4, s8
+; GFX10-NEXT: s_and_b32 s10, s0, s8
+; GFX10-NEXT: s_lshr_b32 s4, s4, 16
+; GFX10-NEXT: s_cmp_lt_u32 s10, s11
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cselect_b32 s10, s10, s11
+; GFX10-NEXT: s_cmp_lt_u32 s9, s4
+; GFX10-NEXT: s_cselect_b32 s4, s9, s4
+; GFX10-NEXT: s_and_b32 s11, s5, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s10, s4
+; GFX10-NEXT: s_lshr_b32 s5, s5, 16
+; GFX10-NEXT: s_lshr_b32 s10, s4, 16
+; GFX10-NEXT: s_sub_i32 s0, s0, s4
+; GFX10-NEXT: s_sub_i32 s4, s9, s10
+; GFX10-NEXT: s_and_b32 s10, s1, s8
+; GFX10-NEXT: s_lshr_b32 s9, s1, 16
+; GFX10-NEXT: s_cmp_lt_u32 s10, s11
+; GFX10-NEXT: s_pack_ll_b32_b16 s0, s0, s4
+; GFX10-NEXT: s_cselect_b32 s10, s10, s11
+; GFX10-NEXT: s_cmp_lt_u32 s9, s5
+; GFX10-NEXT: s_cselect_b32 s5, s9, s5
+; GFX10-NEXT: s_and_b32 s11, s6, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s5, s10, s5
+; GFX10-NEXT: s_lshr_b32 s6, s6, 16
+; GFX10-NEXT: s_lshr_b32 s10, s5, 16
+; GFX10-NEXT: s_sub_i32 s1, s1, s5
+; GFX10-NEXT: s_sub_i32 s5, s9, s10
+; GFX10-NEXT: s_and_b32 s10, s2, s8
+; GFX10-NEXT: s_lshr_b32 s9, s2, 16
+; GFX10-NEXT: s_cmp_lt_u32 s10, s11
+; GFX10-NEXT: s_pack_ll_b32_b16 s1, s1, s5
+; GFX10-NEXT: s_cselect_b32 s10, s10, s11
+; GFX10-NEXT: s_cmp_lt_u32 s9, s6
+; GFX10-NEXT: s_cselect_b32 s6, s9, s6
+; GFX10-NEXT: s_pack_ll_b32_b16 s6, s10, s6
+; GFX10-NEXT: s_lshr_b32 s10, s6, 16
+; GFX10-NEXT: s_sub_i32 s2, s2, s6
+; GFX10-NEXT: s_sub_i32 s6, s9, s10
+; GFX10-NEXT: s_and_b32 s10, s3, s8
+; GFX10-NEXT: s_and_b32 s8, s7, s8
+; GFX10-NEXT: s_lshr_b32 s9, s3, 16
+; GFX10-NEXT: s_lshr_b32 s7, s7, 16
+; GFX10-NEXT: s_cmp_lt_u32 s10, s8
+; GFX10-NEXT: s_pack_ll_b32_b16 s2, s2, s6
+; GFX10-NEXT: s_cselect_b32 s8, s10, s8
+; GFX10-NEXT: s_cmp_lt_u32 s9, s7
+; GFX10-NEXT: s_cselect_b32 s7, s9, s7
+; GFX10-NEXT: s_pack_ll_b32_b16 s4, s8, s7
+; GFX10-NEXT: s_lshr_b32 s5, s4, 16
+; GFX10-NEXT: s_sub_i32 s3, s3, s4
+; GFX10-NEXT: s_sub_i32 s4, s9, s5
+; GFX10-NEXT: s_pack_ll_b32_b16 s3, s3, s4
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <8 x i16> @llvm.usub.sat.v8i16(<8 x i16> %lhs, <8 x i16> %rhs)
+ %cast = bitcast <8 x i16> %result to <4 x i32>
+ ret <4 x i32> %cast
+}
+
+; FIXME: i48 broken because i48 add broken
+; define i48 @v_usubsat_i48(i48 %lhs, i48 %rhs) {
+; %result = call i48 @llvm.usub.sat.i48(i48 %lhs, i48 %rhs)
+; ret i48 %result
+; }
+
+; define amdgpu_ps i48 @s_usubsat_i48(i48 inreg %lhs, i48 inreg %rhs) {
+; %result = call i48 @llvm.usub.sat.i48(i48 %lhs, i48 %rhs)
+; ret i48 %result
+; }
+
+; define amdgpu_ps <2 x float> @usubsat_i48_sv(i48 inreg %lhs, i48 %rhs) {
+; %result = call i48 @llvm.usub.sat.i48(i48 %lhs, i48 %rhs)
+; %ext.result = zext i48 %result to i64
+; %cast = bitcast i64 %ext.result to <2 x float>
+; ret <2 x float> %cast
+; }
+
+; define amdgpu_ps <2 x float> @usubsat_i48_vs(i48 %lhs, i48 inreg %rhs) {
+; %result = call i48 @llvm.usub.sat.i48(i48 %lhs, i48 %rhs)
+; %ext.result = zext i48 %result to i64
+; %cast = bitcast i64 %ext.result to <2 x float>
+; ret <2 x float> %cast
+; }
+
+define i64 @v_usubsat_i64(i64 %lhs, i64 %rhs) {
+; GFX6-LABEL: v_usubsat_i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, v0, v2
+; GFX6-NEXT: v_subb_u32_e32 v5, vcc, v1, v3, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v5, 0, vcc
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_sub_u32_e32 v4, vcc, v0, v2
+; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v1, v3, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v5, 0, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v0, v2
+; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v1, v3, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, 0, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_sub_co_u32_e64 v4, vcc_lo, v0, v2
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v5, vcc_lo, v1, v3, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v5, 0, vcc_lo
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call i64 @llvm.usub.sat.i64(i64 %lhs, i64 %rhs)
+ ret i64 %result
+}
+
+define amdgpu_ps i64 @s_usubsat_i64(i64 inreg %lhs, i64 inreg %rhs) {
+; GFX6-LABEL: s_usubsat_i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_sub_u32 s4, s0, s2
+; GFX6-NEXT: s_cselect_b32 s5, 1, 0
+; GFX6-NEXT: s_and_b32 s5, s5, 1
+; GFX6-NEXT: v_mov_b32_e32 v0, s2
+; GFX6-NEXT: s_cmp_lg_u32 s5, 0
+; GFX6-NEXT: v_mov_b32_e32 v1, s3
+; GFX6-NEXT: s_subb_u32 s5, s1, s3
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX6-NEXT: v_mov_b32_e32 v2, s4
+; GFX6-NEXT: v_mov_b32_e32 v3, s5
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v0
+; GFX6-NEXT: v_readfirstlane_b32 s1, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_sub_u32 s4, s0, s2
+; GFX8-NEXT: s_cselect_b32 s5, 1, 0
+; GFX8-NEXT: s_and_b32 s5, s5, 1
+; GFX8-NEXT: v_mov_b32_e32 v0, s2
+; GFX8-NEXT: s_cmp_lg_u32 s5, 0
+; GFX8-NEXT: v_mov_b32_e32 v1, s3
+; GFX8-NEXT: s_subb_u32 s5, s1, s3
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: v_mov_b32_e32 v3, s5
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v0
+; GFX8-NEXT: v_readfirstlane_b32 s1, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_sub_u32 s4, s0, s2
+; GFX9-NEXT: s_cselect_b32 s5, 1, 0
+; GFX9-NEXT: s_and_b32 s5, s5, 1
+; GFX9-NEXT: v_mov_b32_e32 v0, s2
+; GFX9-NEXT: s_cmp_lg_u32 s5, 0
+; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: s_subb_u32 s5, s1, s3
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: v_readfirstlane_b32 s1, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_sub_u32 s4, s0, s2
+; GFX10-NEXT: s_cselect_b32 s5, 1, 0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s0, s[0:1], s[2:3]
+; GFX10-NEXT: s_and_b32 s5, s5, 1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lg_u32 s5, 0
+; GFX10-NEXT: s_subb_u32 s1, s1, s3
+; GFX10-NEXT: v_cndmask_b32_e64 v0, s4, 0, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, s1, 0, s0
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: v_readfirstlane_b32 s1, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i64 @llvm.usub.sat.i64(i64 %lhs, i64 %rhs)
+ ret i64 %result
+}
+
+define amdgpu_ps <2 x float> @usubsat_i64_sv(i64 inreg %lhs, i64 %rhs) {
+; GFX6-LABEL: usubsat_i64_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v2, s1
+; GFX6-NEXT: v_sub_i32_e32 v3, vcc, s0, v0
+; GFX6-NEXT: v_subb_u32_e32 v2, vcc, v2, v1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v3, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v2, 0, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: usubsat_i64_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: v_sub_u32_e32 v3, vcc, s0, v0
+; GFX8-NEXT: v_subb_u32_e32 v2, vcc, v2, v1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v3, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v2, 0, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: usubsat_i64_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_sub_co_u32_e32 v3, vcc, s0, v0
+; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v2, v1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v3, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v2, 0, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: usubsat_i64_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_sub_co_u32_e64 v2, vcc_lo, s0, v0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[0:1], v[0:1]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i64 @llvm.usub.sat.i64(i64 %lhs, i64 %rhs)
+ %cast = bitcast i64 %result to <2 x float>
+ ret <2 x float> %cast
+}
+
+define amdgpu_ps <2 x float> @usubsat_i64_vs(i64 %lhs, i64 inreg %rhs) {
+; GFX6-LABEL: usubsat_i64_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v2, s1
+; GFX6-NEXT: v_subrev_i32_e32 v3, vcc, s0, v0
+; GFX6-NEXT: v_subb_u32_e32 v2, vcc, v1, v2, vcc
+; GFX6-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v3, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v2, 0, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: usubsat_i64_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: v_subrev_u32_e32 v3, vcc, s0, v0
+; GFX8-NEXT: v_subb_u32_e32 v2, vcc, v1, v2, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v3, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v2, 0, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: usubsat_i64_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_subrev_co_u32_e32 v3, vcc, s0, v0
+; GFX9-NEXT: v_subb_co_u32_e32 v2, vcc, v1, v2, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v3, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v2, 0, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: usubsat_i64_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_sub_co_u32_e64 v2, vcc_lo, v0, s0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_subrev_co_ci_u32_e32 v3, vcc_lo, s1, v1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[0:1], v[0:1]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v2, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v3, 0, vcc_lo
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i64 @llvm.usub.sat.i64(i64 %lhs, i64 %rhs)
+ %cast = bitcast i64 %result to <2 x float>
+ ret <2 x float> %cast
+}
+
+define <2 x i64> @v_usubsat_v2i64(<2 x i64> %lhs, <2 x i64> %rhs) {
+; GFX6-LABEL: v_usubsat_v2i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, v0, v4
+; GFX6-NEXT: v_subb_u32_e32 v9, vcc, v1, v5, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v8, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v9, 0, vcc
+; GFX6-NEXT: v_sub_i32_e32 v4, vcc, v2, v6
+; GFX6-NEXT: v_subb_u32_e32 v5, vcc, v3, v7, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v4, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v5, 0, vcc
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_v2i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_sub_u32_e32 v8, vcc, v0, v4
+; GFX8-NEXT: v_subb_u32_e32 v9, vcc, v1, v5, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v8, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v9, 0, vcc
+; GFX8-NEXT: v_sub_u32_e32 v4, vcc, v2, v6
+; GFX8-NEXT: v_subb_u32_e32 v5, vcc, v3, v7, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v4, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v5, 0, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_v2i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v0, v4
+; GFX9-NEXT: v_subb_co_u32_e32 v9, vcc, v1, v5, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[4:5]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v8, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v9, 0, vcc
+; GFX9-NEXT: v_sub_co_u32_e32 v4, vcc, v2, v6
+; GFX9-NEXT: v_subb_co_u32_e32 v5, vcc, v3, v7, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[6:7]
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v5, 0, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_v2i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_mov_b32_e32 v10, v0
+; GFX10-NEXT: v_mov_b32_e32 v11, v1
+; GFX10-NEXT: v_mov_b32_e32 v0, v2
+; GFX10-NEXT: v_mov_b32_e32 v1, v3
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_sub_co_u32_e64 v8, vcc_lo, v10, v4
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v9, vcc_lo, v11, v5, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[10:11], v[4:5]
+; GFX10-NEXT: v_sub_co_u32_e64 v4, s4, v0, v6
+; GFX10-NEXT: v_sub_co_ci_u32_e64 v5, s4, v1, v7, s4
+; GFX10-NEXT: v_cmp_lt_u64_e64 s4, v[0:1], v[6:7]
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v8, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v9, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v4, 0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v5, 0, s4
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+ ret <2 x i64> %result
+}
+
+define amdgpu_ps <2 x i64> @s_usubsat_v2i64(<2 x i64> inreg %lhs, <2 x i64> inreg %rhs) {
+; GFX6-LABEL: s_usubsat_v2i64:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_sub_u32 s8, s0, s4
+; GFX6-NEXT: s_cselect_b32 s9, 1, 0
+; GFX6-NEXT: s_and_b32 s9, s9, 1
+; GFX6-NEXT: s_cmp_lg_u32 s9, 0
+; GFX6-NEXT: v_mov_b32_e32 v0, s4
+; GFX6-NEXT: s_subb_u32 s9, s1, s5
+; GFX6-NEXT: v_mov_b32_e32 v1, s5
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX6-NEXT: s_sub_u32 s0, s2, s6
+; GFX6-NEXT: s_cselect_b32 s1, 1, 0
+; GFX6-NEXT: s_and_b32 s1, s1, 1
+; GFX6-NEXT: v_mov_b32_e32 v0, s6
+; GFX6-NEXT: s_cmp_lg_u32 s1, 0
+; GFX6-NEXT: v_mov_b32_e32 v2, s8
+; GFX6-NEXT: v_mov_b32_e32 v3, s9
+; GFX6-NEXT: v_mov_b32_e32 v1, s7
+; GFX6-NEXT: s_subb_u32 s1, s3, s7
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX6-NEXT: v_mov_b32_e32 v4, s0
+; GFX6-NEXT: v_mov_b32_e32 v5, s1
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v5, 0, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v2
+; GFX6-NEXT: v_readfirstlane_b32 s1, v3
+; GFX6-NEXT: v_readfirstlane_b32 s2, v0
+; GFX6-NEXT: v_readfirstlane_b32 s3, v1
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_v2i64:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_sub_u32 s8, s0, s4
+; GFX8-NEXT: s_cselect_b32 s9, 1, 0
+; GFX8-NEXT: s_and_b32 s9, s9, 1
+; GFX8-NEXT: s_cmp_lg_u32 s9, 0
+; GFX8-NEXT: v_mov_b32_e32 v0, s4
+; GFX8-NEXT: s_subb_u32 s9, s1, s5
+; GFX8-NEXT: v_mov_b32_e32 v1, s5
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT: s_sub_u32 s0, s2, s6
+; GFX8-NEXT: s_cselect_b32 s1, 1, 0
+; GFX8-NEXT: s_and_b32 s1, s1, 1
+; GFX8-NEXT: v_mov_b32_e32 v0, s6
+; GFX8-NEXT: s_cmp_lg_u32 s1, 0
+; GFX8-NEXT: v_mov_b32_e32 v2, s8
+; GFX8-NEXT: v_mov_b32_e32 v3, s9
+; GFX8-NEXT: v_mov_b32_e32 v1, s7
+; GFX8-NEXT: s_subb_u32 s1, s3, s7
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT: v_mov_b32_e32 v4, s0
+; GFX8-NEXT: v_mov_b32_e32 v5, s1
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v5, 0, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v2
+; GFX8-NEXT: v_readfirstlane_b32 s1, v3
+; GFX8-NEXT: v_readfirstlane_b32 s2, v0
+; GFX8-NEXT: v_readfirstlane_b32 s3, v1
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_v2i64:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_sub_u32 s8, s0, s4
+; GFX9-NEXT: s_cselect_b32 s9, 1, 0
+; GFX9-NEXT: s_and_b32 s9, s9, 1
+; GFX9-NEXT: s_cmp_lg_u32 s9, 0
+; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: s_subb_u32 s9, s1, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, s5
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT: s_sub_u32 s0, s2, s6
+; GFX9-NEXT: s_cselect_b32 s1, 1, 0
+; GFX9-NEXT: s_and_b32 s1, s1, 1
+; GFX9-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-NEXT: s_cmp_lg_u32 s1, 0
+; GFX9-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-NEXT: v_mov_b32_e32 v3, s9
+; GFX9-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-NEXT: s_subb_u32 s1, s3, s7
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, s0
+; GFX9-NEXT: v_mov_b32_e32 v5, s1
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v5, 0, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v2
+; GFX9-NEXT: v_readfirstlane_b32 s1, v3
+; GFX9-NEXT: v_readfirstlane_b32 s2, v0
+; GFX9-NEXT: v_readfirstlane_b32 s3, v1
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_v2i64:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_sub_u32 s8, s0, s4
+; GFX10-NEXT: s_cselect_b32 s9, 1, 0
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_and_b32 s9, s9, 1
+; GFX10-NEXT: s_cmp_lg_u32 s9, 0
+; GFX10-NEXT: s_subb_u32 s9, s1, s5
+; GFX10-NEXT: v_cmp_lt_u64_e64 s1, s[0:1], s[4:5]
+; GFX10-NEXT: s_sub_u32 s0, s2, s6
+; GFX10-NEXT: s_cselect_b32 s4, 1, 0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s2, s[2:3], s[6:7]
+; GFX10-NEXT: s_and_b32 s4, s4, 1
+; GFX10-NEXT: s_cmp_lg_u32 s4, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, s8, 0, s1
+; GFX10-NEXT: v_cndmask_b32_e64 v1, s9, 0, s1
+; GFX10-NEXT: s_subb_u32 s1, s3, s7
+; GFX10-NEXT: v_cndmask_b32_e64 v2, s0, 0, s2
+; GFX10-NEXT: v_cndmask_b32_e64 v3, s1, 0, s2
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: v_readfirstlane_b32 s1, v1
+; GFX10-NEXT: v_readfirstlane_b32 s2, v2
+; GFX10-NEXT: v_readfirstlane_b32 s3, v3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i64> @llvm.usub.sat.v2i64(<2 x i64> %lhs, <2 x i64> %rhs)
+ ret <2 x i64> %result
+}
+
+define amdgpu_ps i128 @s_usubsat_i128(i128 inreg %lhs, i128 inreg %rhs) {
+; GFX6-LABEL: s_usubsat_i128:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_sub_u32 s8, s0, s4
+; GFX6-NEXT: s_cselect_b32 s9, 1, 0
+; GFX6-NEXT: s_and_b32 s9, s9, 1
+; GFX6-NEXT: v_mov_b32_e32 v2, s4
+; GFX6-NEXT: s_cmp_lg_u32 s9, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, s5
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX6-NEXT: s_subb_u32 s9, s1, s5
+; GFX6-NEXT: v_mov_b32_e32 v0, s6
+; GFX6-NEXT: s_cselect_b32 s10, 1, 0
+; GFX6-NEXT: v_mov_b32_e32 v1, s7
+; GFX6-NEXT: s_and_b32 s10, s10, 1
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX6-NEXT: s_cmp_lg_u32 s10, 0
+; GFX6-NEXT: s_subb_u32 s10, s2, s6
+; GFX6-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1]
+; GFX6-NEXT: s_cselect_b32 s11, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX6-NEXT: s_and_b32 s11, s11, 1
+; GFX6-NEXT: s_cmp_lg_u32 s11, 0
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: s_subb_u32 s11, s3, s7
+; GFX6-NEXT: v_mov_b32_e32 v1, s8
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_mov_b32_e32 v2, s9
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v2, 0, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s10
+; GFX6-NEXT: v_mov_b32_e32 v3, s11
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v0
+; GFX6-NEXT: v_readfirstlane_b32 s1, v1
+; GFX6-NEXT: v_readfirstlane_b32 s2, v2
+; GFX6-NEXT: v_readfirstlane_b32 s3, v3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_i128:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_sub_u32 s8, s0, s4
+; GFX8-NEXT: s_cselect_b32 s9, 1, 0
+; GFX8-NEXT: s_and_b32 s9, s9, 1
+; GFX8-NEXT: s_cmp_lg_u32 s9, 0
+; GFX8-NEXT: s_subb_u32 s9, s1, s5
+; GFX8-NEXT: s_cselect_b32 s10, 1, 0
+; GFX8-NEXT: s_and_b32 s10, s10, 1
+; GFX8-NEXT: s_cmp_lg_u32 s10, 0
+; GFX8-NEXT: s_subb_u32 s10, s2, s6
+; GFX8-NEXT: s_cselect_b32 s11, 1, 0
+; GFX8-NEXT: s_and_b32 s11, s11, 1
+; GFX8-NEXT: v_mov_b32_e32 v2, s4
+; GFX8-NEXT: s_cmp_lg_u32 s11, 0
+; GFX8-NEXT: v_mov_b32_e32 v3, s5
+; GFX8-NEXT: s_subb_u32 s11, s3, s7
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v0, s6
+; GFX8-NEXT: v_mov_b32_e32 v1, s7
+; GFX8-NEXT: s_cmp_eq_u64 s[2:3], s[6:7]
+; GFX8-NEXT: s_cselect_b32 s6, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT: s_and_b32 s0, 1, s6
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: v_mov_b32_e32 v1, s8
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_mov_b32_e32 v2, s9
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v2, 0, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s10
+; GFX8-NEXT: v_mov_b32_e32 v3, s11
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v0
+; GFX8-NEXT: v_readfirstlane_b32 s1, v1
+; GFX8-NEXT: v_readfirstlane_b32 s2, v2
+; GFX8-NEXT: v_readfirstlane_b32 s3, v3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_i128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_sub_u32 s8, s0, s4
+; GFX9-NEXT: s_cselect_b32 s9, 1, 0
+; GFX9-NEXT: s_and_b32 s9, s9, 1
+; GFX9-NEXT: s_cmp_lg_u32 s9, 0
+; GFX9-NEXT: s_subb_u32 s9, s1, s5
+; GFX9-NEXT: s_cselect_b32 s10, 1, 0
+; GFX9-NEXT: s_and_b32 s10, s10, 1
+; GFX9-NEXT: s_cmp_lg_u32 s10, 0
+; GFX9-NEXT: s_subb_u32 s10, s2, s6
+; GFX9-NEXT: s_cselect_b32 s11, 1, 0
+; GFX9-NEXT: s_and_b32 s11, s11, 1
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: s_cmp_lg_u32 s11, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
+; GFX9-NEXT: s_subb_u32 s11, s3, s7
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s6
+; GFX9-NEXT: v_mov_b32_e32 v1, s7
+; GFX9-NEXT: s_cmp_eq_u64 s[2:3], s[6:7]
+; GFX9-NEXT: s_cselect_b32 s6, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX9-NEXT: s_and_b32 s0, 1, s6
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_mov_b32_e32 v1, s8
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, s9
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v2, 0, vcc
+; GFX9-NEXT: v_mov_b32_e32 v2, s10
+; GFX9-NEXT: v_mov_b32_e32 v3, s11
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v0
+; GFX9-NEXT: v_readfirstlane_b32 s1, v1
+; GFX9-NEXT: v_readfirstlane_b32 s2, v2
+; GFX9-NEXT: v_readfirstlane_b32 s3, v3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_i128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_sub_u32 s8, s0, s4
+; GFX10-NEXT: s_cselect_b32 s9, 1, 0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s0, s[0:1], s[4:5]
+; GFX10-NEXT: s_and_b32 s9, s9, 1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lg_u32 s9, 0
+; GFX10-NEXT: s_subb_u32 s9, s1, s5
+; GFX10-NEXT: s_cselect_b32 s10, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX10-NEXT: s_and_b32 s10, s10, 1
+; GFX10-NEXT: s_cmp_lg_u32 s10, 0
+; GFX10-NEXT: s_subb_u32 s14, s2, s6
+; GFX10-NEXT: s_cselect_b32 s11, 1, 0
+; GFX10-NEXT: s_and_b32 s11, s11, 1
+; GFX10-NEXT: s_cmp_lg_u32 s11, 0
+; GFX10-NEXT: s_subb_u32 s1, s3, s7
+; GFX10-NEXT: s_cmp_eq_u64 s[2:3], s[6:7]
+; GFX10-NEXT: v_cmp_lt_u64_e64 s2, s[2:3], s[6:7]
+; GFX10-NEXT: s_cselect_b32 s0, 1, 0
+; GFX10-NEXT: s_and_b32 s0, 1, s0
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, s8, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, s14, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, s1, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, s9, 0, vcc_lo
+; GFX10-NEXT: v_readfirstlane_b32 s0, v0
+; GFX10-NEXT: v_readfirstlane_b32 s2, v2
+; GFX10-NEXT: v_readfirstlane_b32 s3, v3
+; GFX10-NEXT: v_readfirstlane_b32 s1, v1
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i128 @llvm.usub.sat.i128(i128 %lhs, i128 %rhs)
+ ret i128 %result
+}
+
+define amdgpu_ps <4 x float> @usubsat_i128_sv(i128 inreg %lhs, i128 %rhs) {
+; GFX6-LABEL: usubsat_i128_sv:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v4, s1
+; GFX6-NEXT: v_sub_i32_e32 v5, vcc, s0, v0
+; GFX6-NEXT: v_subb_u32_e32 v4, vcc, v4, v1, vcc
+; GFX6-NEXT: v_mov_b32_e32 v6, s2
+; GFX6-NEXT: v_mov_b32_e32 v7, s3
+; GFX6-NEXT: v_subb_u32_e32 v6, vcc, v6, v2, vcc
+; GFX6-NEXT: v_subb_u32_e32 v7, vcc, v7, v3, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[2:3]
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v5, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v6, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v7, 0, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: usubsat_i128_sv:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, s1
+; GFX8-NEXT: v_sub_u32_e32 v5, vcc, s0, v0
+; GFX8-NEXT: v_subb_u32_e32 v4, vcc, v4, v1, vcc
+; GFX8-NEXT: v_mov_b32_e32 v6, s2
+; GFX8-NEXT: v_mov_b32_e32 v7, s3
+; GFX8-NEXT: v_subb_u32_e32 v6, vcc, v6, v2, vcc
+; GFX8-NEXT: v_subb_u32_e32 v7, vcc, v7, v3, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v5, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v6, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v7, 0, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: usubsat_i128_sv:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v4, s1
+; GFX9-NEXT: v_sub_co_u32_e32 v5, vcc, s0, v0
+; GFX9-NEXT: v_subb_co_u32_e32 v4, vcc, v4, v1, vcc
+; GFX9-NEXT: v_mov_b32_e32 v6, s2
+; GFX9-NEXT: v_mov_b32_e32 v7, s3
+; GFX9-NEXT: v_subb_co_u32_e32 v6, vcc, v6, v2, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, v7, v3, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v5, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v6, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v7, 0, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: usubsat_i128_sv:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[0:1], v[0:1]
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, s[2:3], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[2:3], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc_lo
+; GFX10-NEXT: v_sub_co_u32_e64 v0, vcc_lo, s0, v0
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
+; GFX10-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v2, vcc_lo, s2, v2, vcc_lo
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, v4
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, 0, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, 0, s0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i128 @llvm.usub.sat.i128(i128 %lhs, i128 %rhs)
+ %cast = bitcast i128 %result to <4 x float>
+ ret <4 x float> %cast
+}
+
+define amdgpu_ps <4 x float> @usubsat_i128_vs(i128 %lhs, i128 inreg %rhs) {
+; GFX6-LABEL: usubsat_i128_vs:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: v_mov_b32_e32 v4, s1
+; GFX6-NEXT: v_subrev_i32_e32 v5, vcc, s0, v0
+; GFX6-NEXT: v_subb_u32_e32 v4, vcc, v1, v4, vcc
+; GFX6-NEXT: v_mov_b32_e32 v6, s2
+; GFX6-NEXT: v_mov_b32_e32 v7, s3
+; GFX6-NEXT: v_subb_u32_e32 v6, vcc, v2, v6, vcc
+; GFX6-NEXT: v_subb_u32_e32 v7, vcc, v3, v7, vcc
+; GFX6-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[2:3]
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v5, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v6, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v7, 0, vcc
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: usubsat_i128_vs:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: v_mov_b32_e32 v4, s1
+; GFX8-NEXT: v_subrev_u32_e32 v5, vcc, s0, v0
+; GFX8-NEXT: v_subb_u32_e32 v4, vcc, v1, v4, vcc
+; GFX8-NEXT: v_mov_b32_e32 v6, s2
+; GFX8-NEXT: v_mov_b32_e32 v7, s3
+; GFX8-NEXT: v_subb_u32_e32 v6, vcc, v2, v6, vcc
+; GFX8-NEXT: v_subb_u32_e32 v7, vcc, v3, v7, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[2:3]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v5, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v6, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v7, 0, vcc
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: usubsat_i128_vs:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: v_mov_b32_e32 v4, s1
+; GFX9-NEXT: v_subrev_co_u32_e32 v5, vcc, s0, v0
+; GFX9-NEXT: v_subb_co_u32_e32 v4, vcc, v1, v4, vcc
+; GFX9-NEXT: v_mov_b32_e32 v6, s2
+; GFX9-NEXT: v_mov_b32_e32 v7, s3
+; GFX9-NEXT: v_subb_co_u32_e32 v6, vcc, v2, v6, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v7, vcc, v3, v7, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[0:1], v[0:1]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_gt_u64_e32 vcc, s[2:3], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[2:3]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v5, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v4, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v6, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v7, 0, vcc
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: usubsat_i128_vs:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[0:1], v[0:1]
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_gt_u64_e32 vcc_lo, s[2:3], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, s[2:3], v[2:3]
+; GFX10-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc_lo
+; GFX10-NEXT: v_sub_co_u32_e64 v0, vcc_lo, v0, s0
+; GFX10-NEXT: v_subrev_co_ci_u32_e32 v1, vcc_lo, s1, v1, vcc_lo
+; GFX10-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX10-NEXT: v_subrev_co_ci_u32_e32 v2, vcc_lo, s2, v2, vcc_lo
+; GFX10-NEXT: v_subrev_co_ci_u32_e32 v3, vcc_lo, s3, v3, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e64 s0, 0, v4
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, 0, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, 0, s0
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call i128 @llvm.usub.sat.i128(i128 %lhs, i128 %rhs)
+ %cast = bitcast i128 %result to <4 x float>
+ ret <4 x float> %cast
+}
+
+define <2 x i128> @v_usubsat_v2i128(<2 x i128> %lhs, <2 x i128> %rhs) {
+; GFX6-LABEL: v_usubsat_v2i128:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX6-NEXT: v_sub_i32_e32 v16, vcc, v0, v8
+; GFX6-NEXT: v_subb_u32_e32 v17, vcc, v1, v9, vcc
+; GFX6-NEXT: v_subb_u32_e32 v18, vcc, v2, v10, vcc
+; GFX6-NEXT: v_subb_u32_e32 v19, vcc, v3, v11, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX6-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[10:11]
+; GFX6-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[10:11]
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v16, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v17, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v18, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v19, 0, vcc
+; GFX6-NEXT: v_sub_i32_e32 v8, vcc, v4, v12
+; GFX6-NEXT: v_subb_u32_e32 v9, vcc, v5, v13, vcc
+; GFX6-NEXT: v_subb_u32_e32 v10, vcc, v6, v14, vcc
+; GFX6-NEXT: v_subb_u32_e32 v11, vcc, v7, v15, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[12:13]
+; GFX6-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[14:15]
+; GFX6-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[14:15]
+; GFX6-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX6-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX6-NEXT: v_cndmask_b32_e64 v4, v8, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v5, v9, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v6, v10, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v7, v11, 0, vcc
+; GFX6-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX8-LABEL: v_usubsat_v2i128:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX8-NEXT: v_sub_u32_e32 v16, vcc, v0, v8
+; GFX8-NEXT: v_subb_u32_e32 v17, vcc, v1, v9, vcc
+; GFX8-NEXT: v_subb_u32_e32 v18, vcc, v2, v10, vcc
+; GFX8-NEXT: v_subb_u32_e32 v19, vcc, v3, v11, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[10:11]
+; GFX8-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[10:11]
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v16, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v17, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v18, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v19, 0, vcc
+; GFX8-NEXT: v_sub_u32_e32 v8, vcc, v4, v12
+; GFX8-NEXT: v_subb_u32_e32 v9, vcc, v5, v13, vcc
+; GFX8-NEXT: v_subb_u32_e32 v10, vcc, v6, v14, vcc
+; GFX8-NEXT: v_subb_u32_e32 v11, vcc, v7, v15, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[12:13]
+; GFX8-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[14:15]
+; GFX8-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX8-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[14:15]
+; GFX8-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX8-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v8, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v9, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v10, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v11, 0, vcc
+; GFX8-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX9-LABEL: v_usubsat_v2i128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_sub_co_u32_e32 v16, vcc, v0, v8
+; GFX9-NEXT: v_subb_co_u32_e32 v17, vcc, v1, v9, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v18, vcc, v2, v10, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v19, vcc, v3, v11, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[0:1], v[8:9]
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[2:3], v[10:11]
+; GFX9-NEXT: v_cndmask_b32_e64 v1, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[10:11]
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v16, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v17, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v18, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v19, 0, vcc
+; GFX9-NEXT: v_sub_co_u32_e32 v8, vcc, v4, v12
+; GFX9-NEXT: v_subb_co_u32_e32 v9, vcc, v5, v13, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v10, vcc, v6, v14, vcc
+; GFX9-NEXT: v_subb_co_u32_e32 v11, vcc, v7, v15, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[4:5], v[12:13]
+; GFX9-NEXT: v_cndmask_b32_e64 v4, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, v[6:7], v[14:15]
+; GFX9-NEXT: v_cndmask_b32_e64 v5, 0, 1, vcc
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[14:15]
+; GFX9-NEXT: v_cndmask_b32_e32 v4, v5, v4, vcc
+; GFX9-NEXT: v_and_b32_e32 v4, 1, v4
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v8, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v9, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v10, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v11, 0, vcc
+; GFX9-NEXT: s_setpc_b64 s[30:31]
+;
+; GFX10-LABEL: v_usubsat_v2i128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX10-NEXT: s_waitcnt_vscnt null, 0x0
+; GFX10-NEXT: v_mov_b32_e32 v22, v0
+; GFX10-NEXT: v_mov_b32_e32 v23, v1
+; GFX10-NEXT: v_mov_b32_e32 v20, v2
+; GFX10-NEXT: v_mov_b32_e32 v21, v3
+; GFX10-NEXT: v_mov_b32_e32 v26, v4
+; GFX10-NEXT: v_mov_b32_e32 v27, v5
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[22:23], v[8:9]
+; GFX10-NEXT: v_mov_b32_e32 v24, v6
+; GFX10-NEXT: v_mov_b32_e32 v25, v7
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: v_cndmask_b32_e64 v16, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[20:21], v[10:11]
+; GFX10-NEXT: v_cmp_eq_u64_e64 s5, v[24:25], v[14:15]
+; GFX10-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[20:21], v[10:11]
+; GFX10-NEXT: v_cndmask_b32_e32 v16, v17, v16, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[26:27], v[12:13]
+; GFX10-NEXT: v_and_b32_e32 v16, 1, v16
+; GFX10-NEXT: v_cndmask_b32_e64 v17, 0, 1, vcc_lo
+; GFX10-NEXT: v_cmp_lt_u64_e32 vcc_lo, v[24:25], v[14:15]
+; GFX10-NEXT: v_cmp_ne_u32_e64 s4, 0, v16
+; GFX10-NEXT: v_cndmask_b32_e64 v18, 0, 1, vcc_lo
+; GFX10-NEXT: v_sub_co_u32_e64 v0, vcc_lo, v22, v8
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v1, vcc_lo, v23, v9, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v8, v18, v17, s5
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v2, vcc_lo, v20, v10, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, v0, 0, s4
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v3, vcc_lo, v21, v11, vcc_lo
+; GFX10-NEXT: v_sub_co_u32_e64 v4, vcc_lo, v26, v12
+; GFX10-NEXT: v_and_b32_e32 v8, 1, v8
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v5, vcc_lo, v27, v13, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v1, v1, 0, s4
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v6, vcc_lo, v24, v14, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, v2, 0, s4
+; GFX10-NEXT: v_cmp_ne_u32_e64 s5, 0, v8
+; GFX10-NEXT: v_sub_co_ci_u32_e32 v7, vcc_lo, v25, v15, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, v3, 0, s4
+; GFX10-NEXT: v_cndmask_b32_e64 v4, v4, 0, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v5, v5, 0, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v6, v6, 0, s5
+; GFX10-NEXT: v_cndmask_b32_e64 v7, v7, 0, s5
+; GFX10-NEXT: s_setpc_b64 s[30:31]
+ %result = call <2 x i128> @llvm.usub.sat.v2i128(<2 x i128> %lhs, <2 x i128> %rhs)
+ ret <2 x i128> %result
+}
+
+define amdgpu_ps <2 x i128> @s_usubsat_v2i128(<2 x i128> inreg %lhs, <2 x i128> inreg %rhs) {
+; GFX6-LABEL: s_usubsat_v2i128:
+; GFX6: ; %bb.0:
+; GFX6-NEXT: s_sub_u32 s16, s0, s8
+; GFX6-NEXT: s_cselect_b32 s17, 1, 0
+; GFX6-NEXT: s_and_b32 s17, s17, 1
+; GFX6-NEXT: s_cmp_lg_u32 s17, 0
+; GFX6-NEXT: s_subb_u32 s17, s1, s9
+; GFX6-NEXT: v_mov_b32_e32 v2, s8
+; GFX6-NEXT: s_cselect_b32 s18, 1, 0
+; GFX6-NEXT: v_mov_b32_e32 v3, s9
+; GFX6-NEXT: s_and_b32 s18, s18, 1
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX6-NEXT: v_mov_b32_e32 v0, s10
+; GFX6-NEXT: s_cmp_lg_u32 s18, 0
+; GFX6-NEXT: v_mov_b32_e32 v1, s11
+; GFX6-NEXT: s_subb_u32 s18, s2, s10
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX6-NEXT: s_cselect_b32 s19, 1, 0
+; GFX6-NEXT: s_and_b32 s19, s19, 1
+; GFX6-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[2:3], v[0:1]
+; GFX6-NEXT: s_cmp_lg_u32 s19, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX6-NEXT: s_subb_u32 s19, s3, s11
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: s_sub_u32 s0, s4, s12
+; GFX6-NEXT: s_cselect_b32 s1, 1, 0
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_mov_b32_e32 v2, s17
+; GFX6-NEXT: s_and_b32 s1, s1, 1
+; GFX6-NEXT: v_mov_b32_e32 v1, s16
+; GFX6-NEXT: v_cndmask_b32_e64 v5, v2, 0, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s12
+; GFX6-NEXT: s_cmp_lg_u32 s1, 0
+; GFX6-NEXT: v_cndmask_b32_e64 v4, v1, 0, vcc
+; GFX6-NEXT: v_mov_b32_e32 v0, s18
+; GFX6-NEXT: v_mov_b32_e32 v1, s19
+; GFX6-NEXT: v_mov_b32_e32 v3, s13
+; GFX6-NEXT: s_subb_u32 s1, s5, s13
+; GFX6-NEXT: v_cndmask_b32_e64 v6, v0, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v7, v1, 0, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; GFX6-NEXT: v_mov_b32_e32 v0, s14
+; GFX6-NEXT: s_cselect_b32 s2, 1, 0
+; GFX6-NEXT: v_mov_b32_e32 v1, s15
+; GFX6-NEXT: s_and_b32 s2, s2, 1
+; GFX6-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX6-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
+; GFX6-NEXT: s_cmp_lg_u32 s2, 0
+; GFX6-NEXT: s_subb_u32 s2, s6, s14
+; GFX6-NEXT: v_cndmask_b32_e64 v3, 0, 1, vcc
+; GFX6-NEXT: v_cmp_eq_u64_e32 vcc, s[6:7], v[0:1]
+; GFX6-NEXT: s_cselect_b32 s3, 1, 0
+; GFX6-NEXT: v_cndmask_b32_e32 v0, v3, v2, vcc
+; GFX6-NEXT: s_and_b32 s3, s3, 1
+; GFX6-NEXT: s_cmp_lg_u32 s3, 0
+; GFX6-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX6-NEXT: s_subb_u32 s3, s7, s15
+; GFX6-NEXT: v_mov_b32_e32 v1, s0
+; GFX6-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX6-NEXT: v_mov_b32_e32 v2, s1
+; GFX6-NEXT: v_mov_b32_e32 v3, s3
+; GFX6-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v1, v2, 0, vcc
+; GFX6-NEXT: v_mov_b32_e32 v2, s2
+; GFX6-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX6-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX6-NEXT: v_readfirstlane_b32 s0, v4
+; GFX6-NEXT: v_readfirstlane_b32 s1, v5
+; GFX6-NEXT: v_readfirstlane_b32 s2, v6
+; GFX6-NEXT: v_readfirstlane_b32 s3, v7
+; GFX6-NEXT: v_readfirstlane_b32 s4, v0
+; GFX6-NEXT: v_readfirstlane_b32 s5, v1
+; GFX6-NEXT: v_readfirstlane_b32 s6, v2
+; GFX6-NEXT: v_readfirstlane_b32 s7, v3
+; GFX6-NEXT: ; return to shader part epilog
+;
+; GFX8-LABEL: s_usubsat_v2i128:
+; GFX8: ; %bb.0:
+; GFX8-NEXT: s_sub_u32 s16, s0, s8
+; GFX8-NEXT: s_cselect_b32 s17, 1, 0
+; GFX8-NEXT: s_and_b32 s17, s17, 1
+; GFX8-NEXT: s_cmp_lg_u32 s17, 0
+; GFX8-NEXT: s_subb_u32 s17, s1, s9
+; GFX8-NEXT: s_cselect_b32 s18, 1, 0
+; GFX8-NEXT: s_and_b32 s18, s18, 1
+; GFX8-NEXT: s_cmp_lg_u32 s18, 0
+; GFX8-NEXT: s_subb_u32 s18, s2, s10
+; GFX8-NEXT: s_cselect_b32 s19, 1, 0
+; GFX8-NEXT: s_and_b32 s19, s19, 1
+; GFX8-NEXT: v_mov_b32_e32 v2, s8
+; GFX8-NEXT: s_cmp_lg_u32 s19, 0
+; GFX8-NEXT: v_mov_b32_e32 v3, s9
+; GFX8-NEXT: s_subb_u32 s19, s3, s11
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v0, s10
+; GFX8-NEXT: v_mov_b32_e32 v1, s11
+; GFX8-NEXT: s_cmp_eq_u64 s[2:3], s[10:11]
+; GFX8-NEXT: s_cselect_b32 s10, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX8-NEXT: s_and_b32 s0, 1, s10
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX8-NEXT: s_sub_u32 s0, s4, s12
+; GFX8-NEXT: s_cselect_b32 s1, 1, 0
+; GFX8-NEXT: s_and_b32 s1, s1, 1
+; GFX8-NEXT: s_cmp_lg_u32 s1, 0
+; GFX8-NEXT: s_subb_u32 s1, s5, s13
+; GFX8-NEXT: s_cselect_b32 s2, 1, 0
+; GFX8-NEXT: s_and_b32 s2, s2, 1
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: s_cmp_lg_u32 s2, 0
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: s_subb_u32 s2, s6, s14
+; GFX8-NEXT: s_cselect_b32 s3, 1, 0
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_mov_b32_e32 v2, s17
+; GFX8-NEXT: s_and_b32 s3, s3, 1
+; GFX8-NEXT: v_mov_b32_e32 v1, s16
+; GFX8-NEXT: v_cndmask_b32_e64 v5, v2, 0, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s12
+; GFX8-NEXT: s_cmp_lg_u32 s3, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v4, v1, 0, vcc
+; GFX8-NEXT: v_mov_b32_e32 v0, s18
+; GFX8-NEXT: v_mov_b32_e32 v1, s19
+; GFX8-NEXT: v_mov_b32_e32 v3, s13
+; GFX8-NEXT: s_subb_u32 s3, s7, s15
+; GFX8-NEXT: v_cndmask_b32_e64 v6, v0, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v7, v1, 0, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; GFX8-NEXT: v_mov_b32_e32 v0, s14
+; GFX8-NEXT: s_cmp_eq_u64 s[6:7], s[14:15]
+; GFX8-NEXT: v_mov_b32_e32 v1, s15
+; GFX8-NEXT: s_cselect_b32 s8, 1, 0
+; GFX8-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX8-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
+; GFX8-NEXT: s_and_b32 s4, 1, s8
+; GFX8-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX8-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX8-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX8-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX8-NEXT: v_mov_b32_e32 v1, s0
+; GFX8-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX8-NEXT: v_mov_b32_e32 v2, s1
+; GFX8-NEXT: v_mov_b32_e32 v3, s3
+; GFX8-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v1, v2, 0, vcc
+; GFX8-NEXT: v_mov_b32_e32 v2, s2
+; GFX8-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX8-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX8-NEXT: v_readfirstlane_b32 s0, v4
+; GFX8-NEXT: v_readfirstlane_b32 s1, v5
+; GFX8-NEXT: v_readfirstlane_b32 s2, v6
+; GFX8-NEXT: v_readfirstlane_b32 s3, v7
+; GFX8-NEXT: v_readfirstlane_b32 s4, v0
+; GFX8-NEXT: v_readfirstlane_b32 s5, v1
+; GFX8-NEXT: v_readfirstlane_b32 s6, v2
+; GFX8-NEXT: v_readfirstlane_b32 s7, v3
+; GFX8-NEXT: ; return to shader part epilog
+;
+; GFX9-LABEL: s_usubsat_v2i128:
+; GFX9: ; %bb.0:
+; GFX9-NEXT: s_sub_u32 s16, s0, s8
+; GFX9-NEXT: s_cselect_b32 s17, 1, 0
+; GFX9-NEXT: s_and_b32 s17, s17, 1
+; GFX9-NEXT: s_cmp_lg_u32 s17, 0
+; GFX9-NEXT: s_subb_u32 s17, s1, s9
+; GFX9-NEXT: s_cselect_b32 s18, 1, 0
+; GFX9-NEXT: s_and_b32 s18, s18, 1
+; GFX9-NEXT: s_cmp_lg_u32 s18, 0
+; GFX9-NEXT: s_subb_u32 s18, s2, s10
+; GFX9-NEXT: s_cselect_b32 s19, 1, 0
+; GFX9-NEXT: s_and_b32 s19, s19, 1
+; GFX9-NEXT: v_mov_b32_e32 v2, s8
+; GFX9-NEXT: s_cmp_lg_u32 s19, 0
+; GFX9-NEXT: v_mov_b32_e32 v3, s9
+; GFX9-NEXT: s_subb_u32 s19, s3, s11
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s10
+; GFX9-NEXT: v_mov_b32_e32 v1, s11
+; GFX9-NEXT: s_cmp_eq_u64 s[2:3], s[10:11]
+; GFX9-NEXT: s_cselect_b32 s10, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[2:3], v[0:1]
+; GFX9-NEXT: s_and_b32 s0, 1, s10
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s0
+; GFX9-NEXT: s_sub_u32 s0, s4, s12
+; GFX9-NEXT: s_cselect_b32 s1, 1, 0
+; GFX9-NEXT: s_and_b32 s1, s1, 1
+; GFX9-NEXT: s_cmp_lg_u32 s1, 0
+; GFX9-NEXT: s_subb_u32 s1, s5, s13
+; GFX9-NEXT: s_cselect_b32 s2, 1, 0
+; GFX9-NEXT: s_and_b32 s2, s2, 1
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: s_cmp_lg_u32 s2, 0
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: s_subb_u32 s2, s6, s14
+; GFX9-NEXT: s_cselect_b32 s3, 1, 0
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, s17
+; GFX9-NEXT: s_and_b32 s3, s3, 1
+; GFX9-NEXT: v_mov_b32_e32 v1, s16
+; GFX9-NEXT: v_cndmask_b32_e64 v5, v2, 0, vcc
+; GFX9-NEXT: v_mov_b32_e32 v2, s12
+; GFX9-NEXT: s_cmp_lg_u32 s3, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v4, v1, 0, vcc
+; GFX9-NEXT: v_mov_b32_e32 v0, s18
+; GFX9-NEXT: v_mov_b32_e32 v1, s19
+; GFX9-NEXT: v_mov_b32_e32 v3, s13
+; GFX9-NEXT: s_subb_u32 s3, s7, s15
+; GFX9-NEXT: v_cndmask_b32_e64 v6, v0, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v7, v1, 0, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[4:5], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v0, s14
+; GFX9-NEXT: s_cmp_eq_u64 s[6:7], s[14:15]
+; GFX9-NEXT: v_mov_b32_e32 v1, s15
+; GFX9-NEXT: s_cselect_b32 s8, 1, 0
+; GFX9-NEXT: v_cndmask_b32_e64 v2, 0, 1, vcc
+; GFX9-NEXT: v_cmp_lt_u64_e32 vcc, s[6:7], v[0:1]
+; GFX9-NEXT: s_and_b32 s4, 1, s8
+; GFX9-NEXT: v_cndmask_b32_e64 v0, 0, 1, vcc
+; GFX9-NEXT: v_cmp_ne_u32_e64 vcc, 0, s4
+; GFX9-NEXT: v_cndmask_b32_e32 v0, v0, v2, vcc
+; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_mov_b32_e32 v2, s1
+; GFX9-NEXT: v_mov_b32_e32 v3, s3
+; GFX9-NEXT: v_cndmask_b32_e64 v0, v1, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v1, v2, 0, vcc
+; GFX9-NEXT: v_mov_b32_e32 v2, s2
+; GFX9-NEXT: v_cndmask_b32_e64 v2, v2, 0, vcc
+; GFX9-NEXT: v_cndmask_b32_e64 v3, v3, 0, vcc
+; GFX9-NEXT: v_readfirstlane_b32 s0, v4
+; GFX9-NEXT: v_readfirstlane_b32 s1, v5
+; GFX9-NEXT: v_readfirstlane_b32 s2, v6
+; GFX9-NEXT: v_readfirstlane_b32 s3, v7
+; GFX9-NEXT: v_readfirstlane_b32 s4, v0
+; GFX9-NEXT: v_readfirstlane_b32 s5, v1
+; GFX9-NEXT: v_readfirstlane_b32 s6, v2
+; GFX9-NEXT: v_readfirstlane_b32 s7, v3
+; GFX9-NEXT: ; return to shader part epilog
+;
+; GFX10-LABEL: s_usubsat_v2i128:
+; GFX10: ; %bb.0:
+; GFX10-NEXT: s_sub_u32 s16, s0, s8
+; GFX10-NEXT: s_cselect_b32 s17, 1, 0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s0, s[0:1], s[8:9]
+; GFX10-NEXT: s_and_b32 s17, s17, 1
+; GFX10-NEXT: ; implicit-def: $vcc_hi
+; GFX10-NEXT: s_cmp_lg_u32 s17, 0
+; GFX10-NEXT: s_subb_u32 s17, s1, s9
+; GFX10-NEXT: s_cselect_b32 s18, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v0, 0, 1, s0
+; GFX10-NEXT: s_and_b32 s18, s18, 1
+; GFX10-NEXT: s_cmp_lg_u32 s18, 0
+; GFX10-NEXT: s_subb_u32 s18, s2, s10
+; GFX10-NEXT: s_cselect_b32 s19, 1, 0
+; GFX10-NEXT: s_and_b32 s19, s19, 1
+; GFX10-NEXT: s_cmp_lg_u32 s19, 0
+; GFX10-NEXT: s_subb_u32 s19, s3, s11
+; GFX10-NEXT: s_cmp_eq_u64 s[2:3], s[10:11]
+; GFX10-NEXT: v_cmp_lt_u64_e64 s2, s[2:3], s[10:11]
+; GFX10-NEXT: s_cselect_b32 s20, 1, 0
+; GFX10-NEXT: s_and_b32 s0, 1, s20
+; GFX10-NEXT: s_sub_u32 s8, s4, s12
+; GFX10-NEXT: s_cselect_b32 s1, 1, 0
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
+; GFX10-NEXT: s_and_b32 s1, s1, 1
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s2
+; GFX10-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10-NEXT: s_subb_u32 s3, s5, s13
+; GFX10-NEXT: s_cselect_b32 s1, 1, 0
+; GFX10-NEXT: v_cndmask_b32_e32 v0, v1, v0, vcc_lo
+; GFX10-NEXT: s_and_b32 s1, s1, 1
+; GFX10-NEXT: s_cmp_lg_u32 s1, 0
+; GFX10-NEXT: v_cmp_lt_u64_e64 s1, s[4:5], s[12:13]
+; GFX10-NEXT: s_subb_u32 s30, s6, s14
+; GFX10-NEXT: s_cselect_b32 s0, 1, 0
+; GFX10-NEXT: v_and_b32_e32 v0, 1, v0
+; GFX10-NEXT: s_and_b32 s0, s0, 1
+; GFX10-NEXT: s_cmp_lg_u32 s0, 0
+; GFX10-NEXT: v_cndmask_b32_e64 v1, 0, 1, s1
+; GFX10-NEXT: s_subb_u32 s9, s7, s15
+; GFX10-NEXT: s_cmp_eq_u64 s[6:7], s[14:15]
+; GFX10-NEXT: v_cmp_lt_u64_e64 s1, s[6:7], s[14:15]
+; GFX10-NEXT: s_cselect_b32 s0, 1, 0
+; GFX10-NEXT: s_and_b32 s0, 1, s0
+; GFX10-NEXT: v_cmp_ne_u32_e64 vcc_lo, 0, s0
+; GFX10-NEXT: v_cndmask_b32_e64 v2, 0, 1, s1
+; GFX10-NEXT: v_cndmask_b32_e32 v1, v2, v1, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT: v_and_b32_e32 v0, 1, v1
+; GFX10-NEXT: v_cndmask_b32_e64 v1, s16, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, s17, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, s18, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v4, s19, 0, vcc_lo
+; GFX10-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX10-NEXT: v_readfirstlane_b32 s0, v1
+; GFX10-NEXT: v_readfirstlane_b32 s1, v2
+; GFX10-NEXT: v_readfirstlane_b32 s2, v3
+; GFX10-NEXT: v_cndmask_b32_e64 v1, s3, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v0, s8, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v2, s30, 0, vcc_lo
+; GFX10-NEXT: v_cndmask_b32_e64 v3, s9, 0, vcc_lo
+; GFX10-NEXT: v_readfirstlane_b32 s3, v4
+; GFX10-NEXT: v_readfirstlane_b32 s5, v1
+; GFX10-NEXT: v_readfirstlane_b32 s4, v0
+; GFX10-NEXT: v_readfirstlane_b32 s6, v2
+; GFX10-NEXT: v_readfirstlane_b32 s7, v3
+; GFX10-NEXT: ; return to shader part epilog
+ %result = call <2 x i128> @llvm.usub.sat.v2i128(<2 x i128> %lhs, <2 x i128> %rhs)
+ ret <2 x i128> %result
+}
+
+declare i7 @llvm.usub.sat.i7(i7, i7) #0
+declare i8 @llvm.usub.sat.i8(i8, i8) #0
+declare <2 x i8> @llvm.usub.sat.v2i8(<2 x i8>, <2 x i8>) #0
+declare <4 x i8> @llvm.usub.sat.v4i8(<4 x i8>, <4 x i8>) #0
+
+declare i16 @llvm.usub.sat.i16(i16, i16) #0
+declare <2 x i16> @llvm.usub.sat.v2i16(<2 x i16>, <2 x i16>) #0
+declare <3 x i16> @llvm.usub.sat.v3i16(<3 x i16>, <3 x i16>) #0
+declare <4 x i16> @llvm.usub.sat.v4i16(<4 x i16>, <4 x i16>) #0
+declare <5 x i16> @llvm.usub.sat.v5i16(<5 x i16>, <5 x i16>) #0
+declare <6 x i16> @llvm.usub.sat.v6i16(<6 x i16>, <6 x i16>) #0
+declare <8 x i16> @llvm.usub.sat.v8i16(<8 x i16>, <8 x i16>) #0
+
+declare i24 @llvm.usub.sat.i24(i24, i24) #0
+
+declare i32 @llvm.usub.sat.i32(i32, i32) #0
+declare <2 x i32> @llvm.usub.sat.v2i32(<2 x i32>, <2 x i32>) #0
+declare <3 x i32> @llvm.usub.sat.v3i32(<3 x i32>, <3 x i32>) #0
+declare <4 x i32> @llvm.usub.sat.v4i32(<4 x i32>, <4 x i32>) #0
+declare <5 x i32> @llvm.usub.sat.v5i32(<5 x i32>, <5 x i32>) #0
+declare <16 x i32> @llvm.usub.sat.v16i32(<16 x i32>, <16 x i32>) #0
+
+declare i48 @llvm.usub.sat.i48(i48, i48) #0
+
+declare i64 @llvm.usub.sat.i64(i64, i64) #0
+declare <2 x i64> @llvm.usub.sat.v2i64(<2 x i64>, <2 x i64>) #0
+
+declare i128 @llvm.usub.sat.i128(i128, i128) #0
+declare <2 x i128> @llvm.usub.sat.v2i128(<2 x i128>, <2 x i128>) #0
+
+attributes #0 = { nounwind readnone speculatable willreturn }
More information about the llvm-commits
mailing list