[llvm] r350971 - [ConstantFolding] Fold undef for integer intrinsics

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 11 13:18:00 PST 2019


Author: nikic
Date: Fri Jan 11 13:18:00 2019
New Revision: 350971

URL: http://llvm.org/viewvc/llvm-project?rev=350971&view=rev
Log:
[ConstantFolding] Fold undef for integer intrinsics

This fixes https://bugs.llvm.org/show_bug.cgi?id=40110.

This implements handling of undef operands for integer intrinsics in
ConstantFolding, in particular for the bitcounting intrinsics (ctpop,
cttz, ctlz), the with.overflow intrinsics, the saturating math
intrinsics and the funnel shift intrinsics.

The undef behavior follows what InstSimplify does for the general cas
e of non-constant operands. For the bitcount intrinsics (where
InstSimplify doesn't do undef handling -- there cannot be a combination
of an undef + non-constant operand) I'm using a 0 result if the intrinsic
is defined for zero and undef otherwise.

Differential Revision: https://reviews.llvm.org/D55950

Removed:
    llvm/trunk/test/Transforms/InstCombine/saturating-add-sub-vector.ll
Modified:
    llvm/trunk/lib/Analysis/ConstantFolding.cpp
    llvm/trunk/test/Analysis/ConstantFolding/bitcount.ll
    llvm/trunk/test/Analysis/ConstantFolding/funnel-shift.ll
    llvm/trunk/test/Analysis/ConstantFolding/saturating-add-sub.ll
    llvm/trunk/test/Transforms/ConstProp/overflow-ops.ll

Modified: llvm/trunk/lib/Analysis/ConstantFolding.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ConstantFolding.cpp?rev=350971&r1=350970&r2=350971&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/ConstantFolding.cpp (original)
+++ llvm/trunk/lib/Analysis/ConstantFolding.cpp Fri Jan 11 13:18:00 2019
@@ -1629,6 +1629,18 @@ static bool isManifestConstant(const Con
   return false;
 }
 
+static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
+  if (auto *CI = dyn_cast<ConstantInt>(Op)) {
+    C = &CI->getValue();
+    return true;
+  }
+  if (isa<UndefValue>(Op)) {
+    C = nullptr;
+    return true;
+  }
+  return false;
+}
+
 Constant *ConstantFoldScalarCall(StringRef Name, unsigned IntrinsicID, Type *Ty,
                                  ArrayRef<Constant *> Operands,
                                  const TargetLibraryInfo *TLI,
@@ -1643,8 +1655,10 @@ Constant *ConstantFoldScalarCall(StringR
       return nullptr;
     }
     if (isa<UndefValue>(Operands[0])) {
-      // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN
-      if (IntrinsicID == Intrinsic::cos)
+      // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
+      // ctpop() is between 0 and bitwidth, pick 0 for undef.
+      if (IntrinsicID == Intrinsic::cos ||
+          IntrinsicID == Intrinsic::ctpop)
         return Constant::getNullValue(Ty);
       if (IntrinsicID == Intrinsic::bswap ||
           IntrinsicID == Intrinsic::bitreverse ||
@@ -1995,62 +2009,92 @@ Constant *ConstantFoldScalarCall(StringR
       return nullptr;
     }
 
-    if (auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
-      if (auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
+    if (Operands[0]->getType()->isIntegerTy() &&
+        Operands[1]->getType()->isIntegerTy()) {
+      const APInt *C0, *C1;
+      if (!getConstIntOrUndef(Operands[0], C0) ||
+          !getConstIntOrUndef(Operands[1], C1))
+        return nullptr;
+
+      switch (IntrinsicID) {
+      default: break;
+      case Intrinsic::smul_with_overflow:
+      case Intrinsic::umul_with_overflow:
+        // Even if both operands are undef, we cannot fold muls to undef
+        // in the general case. For example, on i2 there are no inputs
+        // that would produce { i2 -1, i1 true } as the result.
+        if (!C0 || !C1)
+          return Constant::getNullValue(Ty);
+        LLVM_FALLTHROUGH;
+      case Intrinsic::sadd_with_overflow:
+      case Intrinsic::uadd_with_overflow:
+      case Intrinsic::ssub_with_overflow:
+      case Intrinsic::usub_with_overflow: {
+        if (!C0 || !C1)
+          return UndefValue::get(Ty);
+
+        APInt Res;
+        bool Overflow;
         switch (IntrinsicID) {
-        default: break;
+        default: llvm_unreachable("Invalid case");
         case Intrinsic::sadd_with_overflow:
+          Res = C0->sadd_ov(*C1, Overflow);
+          break;
         case Intrinsic::uadd_with_overflow:
+          Res = C0->uadd_ov(*C1, Overflow);
+          break;
         case Intrinsic::ssub_with_overflow:
+          Res = C0->ssub_ov(*C1, Overflow);
+          break;
         case Intrinsic::usub_with_overflow:
+          Res = C0->usub_ov(*C1, Overflow);
+          break;
         case Intrinsic::smul_with_overflow:
-        case Intrinsic::umul_with_overflow: {
-          APInt Res;
-          bool Overflow;
-          switch (IntrinsicID) {
-          default: llvm_unreachable("Invalid case");
-          case Intrinsic::sadd_with_overflow:
-            Res = Op1->getValue().sadd_ov(Op2->getValue(), Overflow);
-            break;
-          case Intrinsic::uadd_with_overflow:
-            Res = Op1->getValue().uadd_ov(Op2->getValue(), Overflow);
-            break;
-          case Intrinsic::ssub_with_overflow:
-            Res = Op1->getValue().ssub_ov(Op2->getValue(), Overflow);
-            break;
-          case Intrinsic::usub_with_overflow:
-            Res = Op1->getValue().usub_ov(Op2->getValue(), Overflow);
-            break;
-          case Intrinsic::smul_with_overflow:
-            Res = Op1->getValue().smul_ov(Op2->getValue(), Overflow);
-            break;
-          case Intrinsic::umul_with_overflow:
-            Res = Op1->getValue().umul_ov(Op2->getValue(), Overflow);
-            break;
-          }
-          Constant *Ops[] = {
-            ConstantInt::get(Ty->getContext(), Res),
-            ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
-          };
-          return ConstantStruct::get(cast<StructType>(Ty), Ops);
-        }
-        case Intrinsic::uadd_sat:
-          return ConstantInt::get(Ty, Op1->getValue().uadd_sat(Op2->getValue()));
-        case Intrinsic::sadd_sat:
-          return ConstantInt::get(Ty, Op1->getValue().sadd_sat(Op2->getValue()));
-        case Intrinsic::usub_sat:
-          return ConstantInt::get(Ty, Op1->getValue().usub_sat(Op2->getValue()));
-        case Intrinsic::ssub_sat:
-          return ConstantInt::get(Ty, Op1->getValue().ssub_sat(Op2->getValue()));
-        case Intrinsic::cttz:
-          if (Op2->isOne() && Op1->isZero()) // cttz(0, 1) is undef.
-            return UndefValue::get(Ty);
-          return ConstantInt::get(Ty, Op1->getValue().countTrailingZeros());
-        case Intrinsic::ctlz:
-          if (Op2->isOne() && Op1->isZero()) // ctlz(0, 1) is undef.
-            return UndefValue::get(Ty);
-          return ConstantInt::get(Ty, Op1->getValue().countLeadingZeros());
-        }
+          Res = C0->smul_ov(*C1, Overflow);
+          break;
+        case Intrinsic::umul_with_overflow:
+          Res = C0->umul_ov(*C1, Overflow);
+          break;
+        }
+        Constant *Ops[] = {
+          ConstantInt::get(Ty->getContext(), Res),
+          ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
+        };
+        return ConstantStruct::get(cast<StructType>(Ty), Ops);
+      }
+      case Intrinsic::uadd_sat:
+      case Intrinsic::sadd_sat:
+        if (!C0 && !C1)
+          return UndefValue::get(Ty);
+        if (!C0 || !C1)
+          return Constant::getAllOnesValue(Ty);
+        if (IntrinsicID == Intrinsic::uadd_sat)
+          return ConstantInt::get(Ty, C0->uadd_sat(*C1));
+        else
+          return ConstantInt::get(Ty, C0->sadd_sat(*C1));
+      case Intrinsic::usub_sat:
+      case Intrinsic::ssub_sat:
+        if (!C0 && !C1)
+          return UndefValue::get(Ty);
+        if (!C0 || !C1)
+          return Constant::getNullValue(Ty);
+        if (IntrinsicID == Intrinsic::usub_sat)
+          return ConstantInt::get(Ty, C0->usub_sat(*C1));
+        else
+          return ConstantInt::get(Ty, C0->ssub_sat(*C1));
+      case Intrinsic::cttz:
+      case Intrinsic::ctlz:
+        assert(C1 && "Must be constant int");
+
+        // cttz(0, 1) and ctlz(0, 1) are undef.
+        if (C1->isOneValue() && (!C0 || C0->isNullValue()))
+          return UndefValue::get(Ty);
+        if (!C0)
+          return Constant::getNullValue(Ty);
+        if (IntrinsicID == Intrinsic::cttz)
+          return ConstantInt::get(Ty, C0->countTrailingZeros());
+        else
+          return ConstantInt::get(Ty, C0->countLeadingZeros());
       }
 
       return nullptr;
@@ -2136,26 +2180,33 @@ Constant *ConstantFoldScalarCall(StringR
   }
 
   if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
-    auto *C0 = dyn_cast<ConstantInt>(Operands[0]);
-    auto *C1 = dyn_cast<ConstantInt>(Operands[1]);
-    auto *C2 = dyn_cast<ConstantInt>(Operands[2]);
-    if (!(C0 && C1 && C2))
+    const APInt *C0, *C1, *C2;
+    if (!getConstIntOrUndef(Operands[0], C0) ||
+        !getConstIntOrUndef(Operands[1], C1) ||
+        !getConstIntOrUndef(Operands[2], C2))
       return nullptr;
 
+    bool IsRight = IntrinsicID == Intrinsic::fshr;
+    if (!C2)
+      return Operands[IsRight ? 1 : 0];
+    if (!C0 && !C1)
+      return UndefValue::get(Ty);
+
     // The shift amount is interpreted as modulo the bitwidth. If the shift
     // amount is effectively 0, avoid UB due to oversized inverse shift below.
-    unsigned BitWidth = C0->getBitWidth();
-    unsigned ShAmt = C2->getValue().urem(BitWidth);
-    bool IsRight = IntrinsicID == Intrinsic::fshr;
+    unsigned BitWidth = C2->getBitWidth();
+    unsigned ShAmt = C2->urem(BitWidth);
     if (!ShAmt)
-      return IsRight ? C1 : C0;
+      return Operands[IsRight ? 1 : 0];
 
-    // (X << ShlAmt) | (Y >> LshrAmt)
-    const APInt &X = C0->getValue();
-    const APInt &Y = C1->getValue();
+    // (C0 << ShlAmt) | (C1 >> LshrAmt)
     unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
     unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
-    return ConstantInt::get(Ty->getContext(), X.shl(ShlAmt) | Y.lshr(LshrAmt));
+    if (!C0)
+      return ConstantInt::get(Ty, C1->lshr(LshrAmt));
+    if (!C1)
+      return ConstantInt::get(Ty, C0->shl(ShlAmt));
+    return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
   }
 
   return nullptr;

Modified: llvm/trunk/test/Analysis/ConstantFolding/bitcount.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ConstantFolding/bitcount.ll?rev=350971&r1=350970&r2=350971&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ConstantFolding/bitcount.ll (original)
+++ llvm/trunk/test/Analysis/ConstantFolding/bitcount.ll Fri Jan 11 13:18:00 2019
@@ -74,8 +74,7 @@ define i33 @ctlz_zero_undefined() {
 
 define i31 @ctpop_undef() {
 ; CHECK-LABEL: @ctpop_undef(
-; CHECK-NEXT:    [[X:%.*]] = call i31 @llvm.ctpop.i31(i31 undef)
-; CHECK-NEXT:    ret i31 [[X]]
+; CHECK-NEXT:    ret i31 0
 ;
   %x = call i31 @llvm.ctpop.i31(i31 undef)
   ret i31 %x
@@ -83,8 +82,7 @@ define i31 @ctpop_undef() {
 
 define i32 @cttz_undef_defined() {
 ; CHECK-LABEL: @cttz_undef_defined(
-; CHECK-NEXT:    [[X:%.*]] = call i32 @llvm.cttz.i32(i32 undef, i1 false)
-; CHECK-NEXT:    ret i32 [[X]]
+; CHECK-NEXT:    ret i32 0
 ;
   %x = call i32 @llvm.cttz.i32(i32 undef, i1 false)
   ret i32 %x
@@ -92,8 +90,7 @@ define i32 @cttz_undef_defined() {
 
 define i32 @cttz_undef_undefined() {
 ; CHECK-LABEL: @cttz_undef_undefined(
-; CHECK-NEXT:    [[X:%.*]] = call i32 @llvm.cttz.i32(i32 undef, i1 true)
-; CHECK-NEXT:    ret i32 [[X]]
+; CHECK-NEXT:    ret i32 undef
 ;
   %x = call i32 @llvm.cttz.i32(i32 undef, i1 true)
   ret i32 %x
@@ -101,8 +98,7 @@ define i32 @cttz_undef_undefined() {
 
 define i33 @ctlz_undef_defined() {
 ; CHECK-LABEL: @ctlz_undef_defined(
-; CHECK-NEXT:    [[X:%.*]] = call i33 @llvm.ctlz.i33(i33 undef, i1 false)
-; CHECK-NEXT:    ret i33 [[X]]
+; CHECK-NEXT:    ret i33 0
 ;
   %x = call i33 @llvm.ctlz.i33(i33 undef, i1 false)
   ret i33 %x
@@ -110,8 +106,7 @@ define i33 @ctlz_undef_defined() {
 
 define i33 @ctlz_undef_undefined() {
 ; CHECK-LABEL: @ctlz_undef_undefined(
-; CHECK-NEXT:    [[X:%.*]] = call i33 @llvm.ctlz.i33(i33 undef, i1 true)
-; CHECK-NEXT:    ret i33 [[X]]
+; CHECK-NEXT:    ret i33 undef
 ;
   %x = call i33 @llvm.ctlz.i33(i33 undef, i1 true)
   ret i33 %x
@@ -127,8 +122,7 @@ define <2 x i31> @ctpop_vector() {
 
 define <2 x i31> @ctpop_vector_undef() {
 ; CHECK-LABEL: @ctpop_vector_undef(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i31> @llvm.ctpop.v2i31(<2 x i31> <i31 0, i31 undef>)
-; CHECK-NEXT:    ret <2 x i31> [[X]]
+; CHECK-NEXT:    ret <2 x i31> zeroinitializer
 ;
   %x = call <2 x i31> @llvm.ctpop.v2i31(<2 x i31> <i31 0, i31 undef>)
   ret <2 x i31> %x
@@ -144,8 +138,7 @@ define <2 x i32> @cttz_vector() {
 
 define <2 x i32> @cttz_vector_undef_defined() {
 ; CHECK-LABEL: @cttz_vector_undef_defined(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> <i32 0, i32 undef>, i1 false)
-; CHECK-NEXT:    ret <2 x i32> [[X]]
+; CHECK-NEXT:    ret <2 x i32> <i32 32, i32 0>
 ;
   %x = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> <i32 0, i32 undef>, i1 false)
   ret <2 x i32> %x
@@ -153,8 +146,7 @@ define <2 x i32> @cttz_vector_undef_defi
 
 define <2 x i32> @cttz_vector_undef_undefined() {
 ; CHECK-LABEL: @cttz_vector_undef_undefined(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> <i32 0, i32 undef>, i1 true)
-; CHECK-NEXT:    ret <2 x i32> [[X]]
+; CHECK-NEXT:    ret <2 x i32> undef
 ;
   %x = call <2 x i32> @llvm.cttz.v2i32(<2 x i32> <i32 0, i32 undef>, i1 true)
   ret <2 x i32> %x
@@ -170,8 +162,7 @@ define <2 x i33> @ctlz_vector() {
 
 define <2 x i33> @ctlz_vector_undef_defined() {
 ; CHECK-LABEL: @ctlz_vector_undef_defined(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i33> @llvm.ctlz.v2i33(<2 x i33> <i33 0, i33 undef>, i1 false)
-; CHECK-NEXT:    ret <2 x i33> [[X]]
+; CHECK-NEXT:    ret <2 x i33> <i33 33, i33 0>
 ;
   %x = call <2 x i33> @llvm.ctlz.v2i33(<2 x i33> <i33 0, i33 undef>, i1 false)
   ret <2 x i33> %x
@@ -179,8 +170,7 @@ define <2 x i33> @ctlz_vector_undef_defi
 
 define <2 x i33> @ctlz_vector_undef_undefined() {
 ; CHECK-LABEL: @ctlz_vector_undef_undefined(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i33> @llvm.ctlz.v2i33(<2 x i33> <i33 0, i33 undef>, i1 true)
-; CHECK-NEXT:    ret <2 x i33> [[X]]
+; CHECK-NEXT:    ret <2 x i33> undef
 ;
   %x = call <2 x i33> @llvm.ctlz.v2i33(<2 x i33> <i33 0, i33 undef>, i1 true)
   ret <2 x i33> %x

Modified: llvm/trunk/test/Analysis/ConstantFolding/funnel-shift.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ConstantFolding/funnel-shift.ll?rev=350971&r1=350970&r2=350971&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ConstantFolding/funnel-shift.ll (original)
+++ llvm/trunk/test/Analysis/ConstantFolding/funnel-shift.ll Fri Jan 11 13:18:00 2019
@@ -85,8 +85,7 @@ define <4 x i8> @fshr_v4i8() {
 
 define i32 @fshl_scalar_all_undef() {
 ; CHECK-LABEL: @fshl_scalar_all_undef(
-; CHECK-NEXT:    [[F:%.*]] = call i32 @llvm.fshl.i32(i32 undef, i32 undef, i32 undef)
-; CHECK-NEXT:    ret i32 [[F]]
+; CHECK-NEXT:    ret i32 undef
 ;
   %f = call i32 @llvm.fshl.i32(i32 undef, i32 undef, i32 undef)
   ret i32 %f
@@ -94,8 +93,7 @@ define i32 @fshl_scalar_all_undef() {
 
 define i32 @fshr_scalar_all_undef() {
 ; CHECK-LABEL: @fshr_scalar_all_undef(
-; CHECK-NEXT:    [[F:%.*]] = call i32 @llvm.fshr.i32(i32 undef, i32 undef, i32 undef)
-; CHECK-NEXT:    ret i32 [[F]]
+; CHECK-NEXT:    ret i32 undef
 ;
   %f = call i32 @llvm.fshr.i32(i32 undef, i32 undef, i32 undef)
   ret i32 %f
@@ -103,8 +101,7 @@ define i32 @fshr_scalar_all_undef() {
 
 define i32 @fshl_scalar_undef_shamt() {
 ; CHECK-LABEL: @fshl_scalar_undef_shamt(
-; CHECK-NEXT:    [[F:%.*]] = call i32 @llvm.fshl.i32(i32 1, i32 2, i32 undef)
-; CHECK-NEXT:    ret i32 [[F]]
+; CHECK-NEXT:    ret i32 1
 ;
   %f = call i32 @llvm.fshl.i32(i32 1, i32 2, i32 undef)
   ret i32 %f
@@ -112,8 +109,7 @@ define i32 @fshl_scalar_undef_shamt() {
 
 define i32 @fshr_scalar_undef_shamt() {
 ; CHECK-LABEL: @fshr_scalar_undef_shamt(
-; CHECK-NEXT:    [[F:%.*]] = call i32 @llvm.fshr.i32(i32 1, i32 2, i32 undef)
-; CHECK-NEXT:    ret i32 [[F]]
+; CHECK-NEXT:    ret i32 2
 ;
   %f = call i32 @llvm.fshr.i32(i32 1, i32 2, i32 undef)
   ret i32 %f
@@ -121,8 +117,7 @@ define i32 @fshr_scalar_undef_shamt() {
 
 define i32 @fshl_scalar_undef_ops() {
 ; CHECK-LABEL: @fshl_scalar_undef_ops(
-; CHECK-NEXT:    [[F:%.*]] = call i32 @llvm.fshl.i32(i32 undef, i32 undef, i32 7)
-; CHECK-NEXT:    ret i32 [[F]]
+; CHECK-NEXT:    ret i32 undef
 ;
   %f = call i32 @llvm.fshl.i32(i32 undef, i32 undef, i32 7)
   ret i32 %f
@@ -130,8 +125,7 @@ define i32 @fshl_scalar_undef_ops() {
 
 define i32 @fshr_scalar_undef_ops() {
 ; CHECK-LABEL: @fshr_scalar_undef_ops(
-; CHECK-NEXT:    [[F:%.*]] = call i32 @llvm.fshr.i32(i32 undef, i32 undef, i32 7)
-; CHECK-NEXT:    ret i32 [[F]]
+; CHECK-NEXT:    ret i32 undef
 ;
   %f = call i32 @llvm.fshr.i32(i32 undef, i32 undef, i32 7)
   ret i32 %f
@@ -139,8 +133,7 @@ define i32 @fshr_scalar_undef_ops() {
 
 define i32 @fshl_scalar_undef_op1_zero_shift() {
 ; CHECK-LABEL: @fshl_scalar_undef_op1_zero_shift(
-; CHECK-NEXT:    [[F:%.*]] = call i32 @llvm.fshl.i32(i32 undef, i32 1, i32 0)
-; CHECK-NEXT:    ret i32 [[F]]
+; CHECK-NEXT:    ret i32 undef
 ;
   %f = call i32 @llvm.fshl.i32(i32 undef, i32 1, i32 0)
   ret i32 %f
@@ -148,8 +141,7 @@ define i32 @fshl_scalar_undef_op1_zero_s
 
 define i32 @fshl_scalar_undef_op2_zero_shift() {
 ; CHECK-LABEL: @fshl_scalar_undef_op2_zero_shift(
-; CHECK-NEXT:    [[F:%.*]] = call i32 @llvm.fshl.i32(i32 1, i32 undef, i32 32)
-; CHECK-NEXT:    ret i32 [[F]]
+; CHECK-NEXT:    ret i32 1
 ;
   %f = call i32 @llvm.fshl.i32(i32 1, i32 undef, i32 32)
   ret i32 %f
@@ -157,8 +149,7 @@ define i32 @fshl_scalar_undef_op2_zero_s
 
 define i32 @fshr_scalar_undef_op1_zero_shift() {
 ; CHECK-LABEL: @fshr_scalar_undef_op1_zero_shift(
-; CHECK-NEXT:    [[F:%.*]] = call i32 @llvm.fshr.i32(i32 undef, i32 1, i32 64)
-; CHECK-NEXT:    ret i32 [[F]]
+; CHECK-NEXT:    ret i32 1
 ;
   %f = call i32 @llvm.fshr.i32(i32 undef, i32 1, i32 64)
   ret i32 %f
@@ -166,8 +157,7 @@ define i32 @fshr_scalar_undef_op1_zero_s
 
 define i32 @fshr_scalar_undef_op2_zero_shift() {
 ; CHECK-LABEL: @fshr_scalar_undef_op2_zero_shift(
-; CHECK-NEXT:    [[F:%.*]] = call i32 @llvm.fshr.i32(i32 1, i32 undef, i32 0)
-; CHECK-NEXT:    ret i32 [[F]]
+; CHECK-NEXT:    ret i32 undef
 ;
   %f = call i32 @llvm.fshr.i32(i32 1, i32 undef, i32 0)
   ret i32 %f
@@ -175,8 +165,7 @@ define i32 @fshr_scalar_undef_op2_zero_s
 
 define i32 @fshl_scalar_undef_op1_nonzero_shift() {
 ; CHECK-LABEL: @fshl_scalar_undef_op1_nonzero_shift(
-; CHECK-NEXT:    [[F:%.*]] = call i32 @llvm.fshl.i32(i32 undef, i32 -1, i32 8)
-; CHECK-NEXT:    ret i32 [[F]]
+; CHECK-NEXT:    ret i32 255
 ;
   %f = call i32 @llvm.fshl.i32(i32 undef, i32 -1, i32 8)
   ret i32 %f
@@ -184,8 +173,7 @@ define i32 @fshl_scalar_undef_op1_nonzer
 
 define i32 @fshl_scalar_undef_op2_nonzero_shift() {
 ; CHECK-LABEL: @fshl_scalar_undef_op2_nonzero_shift(
-; CHECK-NEXT:    [[F:%.*]] = call i32 @llvm.fshl.i32(i32 -1, i32 undef, i32 8)
-; CHECK-NEXT:    ret i32 [[F]]
+; CHECK-NEXT:    ret i32 -256
 ;
   %f = call i32 @llvm.fshl.i32(i32 -1, i32 undef, i32 8)
   ret i32 %f
@@ -193,8 +181,7 @@ define i32 @fshl_scalar_undef_op2_nonzer
 
 define i32 @fshr_scalar_undef_op1_nonzero_shift() {
 ; CHECK-LABEL: @fshr_scalar_undef_op1_nonzero_shift(
-; CHECK-NEXT:    [[F:%.*]] = call i32 @llvm.fshr.i32(i32 undef, i32 -1, i32 8)
-; CHECK-NEXT:    ret i32 [[F]]
+; CHECK-NEXT:    ret i32 16777215
 ;
   %f = call i32 @llvm.fshr.i32(i32 undef, i32 -1, i32 8)
   ret i32 %f
@@ -202,8 +189,7 @@ define i32 @fshr_scalar_undef_op1_nonzer
 
 define i32 @fshr_scalar_undef_op2_nonzero_shift() {
 ; CHECK-LABEL: @fshr_scalar_undef_op2_nonzero_shift(
-; CHECK-NEXT:    [[F:%.*]] = call i32 @llvm.fshr.i32(i32 -1, i32 undef, i32 8)
-; CHECK-NEXT:    ret i32 [[F]]
+; CHECK-NEXT:    ret i32 -16777216
 ;
   %f = call i32 @llvm.fshr.i32(i32 -1, i32 undef, i32 8)
   ret i32 %f
@@ -212,8 +198,7 @@ define i32 @fshr_scalar_undef_op2_nonzer
 ; Undef/Undef/Undef; 1/2/Undef; Undef/Undef/3; Undef/1/0
 define <4 x i8> @fshl_vector_mix1() {
 ; CHECK-LABEL: @fshl_vector_mix1(
-; CHECK-NEXT:    [[F:%.*]] = call <4 x i8> @llvm.fshl.v4i8(<4 x i8> <i8 undef, i8 1, i8 undef, i8 undef>, <4 x i8> <i8 undef, i8 2, i8 undef, i8 1>, <4 x i8> <i8 undef, i8 undef, i8 3, i8 0>)
-; CHECK-NEXT:    ret <4 x i8> [[F]]
+; CHECK-NEXT:    ret <4 x i8> <i8 undef, i8 1, i8 undef, i8 undef>
 ;
   %f = call <4 x i8> @llvm.fshl.v4i8(<4 x i8> <i8 undef, i8 1, i8 undef, i8 undef>, <4 x i8> <i8 undef, i8 2, i8 undef, i8 1>, <4 x i8> <i8 undef, i8 undef, i8 3, i8 0>)
   ret <4 x i8> %f
@@ -222,8 +207,7 @@ define <4 x i8> @fshl_vector_mix1() {
 ; 1/Undef/8; Undef/-1/2; -1/Undef/2; 7/8/4
 define <4 x i8> @fshl_vector_mix2() {
 ; CHECK-LABEL: @fshl_vector_mix2(
-; CHECK-NEXT:    [[F:%.*]] = call <4 x i8> @llvm.fshl.v4i8(<4 x i8> <i8 1, i8 undef, i8 -1, i8 7>, <4 x i8> <i8 undef, i8 -1, i8 undef, i8 8>, <4 x i8> <i8 8, i8 2, i8 2, i8 4>)
-; CHECK-NEXT:    ret <4 x i8> [[F]]
+; CHECK-NEXT:    ret <4 x i8> <i8 1, i8 3, i8 -4, i8 112>
 ;
   %f = call <4 x i8> @llvm.fshl.v4i8(<4 x i8> <i8 1, i8 undef, i8 -1, i8 7>, <4 x i8> <i8 undef, i8 -1, i8 undef, i8 8>, <4 x i8> <i8 8, i8 2, i8 2, i8 4>)
   ret <4 x i8> %f
@@ -232,8 +216,7 @@ define <4 x i8> @fshl_vector_mix2() {
 ; Undef/Undef/Undef; 1/2/Undef; Undef/Undef/3; Undef/1/0
 define <4 x i8> @fshr_vector_mix1() {
 ; CHECK-LABEL: @fshr_vector_mix1(
-; CHECK-NEXT:    [[F:%.*]] = call <4 x i8> @llvm.fshr.v4i8(<4 x i8> <i8 undef, i8 1, i8 undef, i8 undef>, <4 x i8> <i8 undef, i8 2, i8 undef, i8 1>, <4 x i8> <i8 undef, i8 undef, i8 3, i8 0>)
-; CHECK-NEXT:    ret <4 x i8> [[F]]
+; CHECK-NEXT:    ret <4 x i8> <i8 undef, i8 2, i8 undef, i8 1>
 ;
   %f = call <4 x i8> @llvm.fshr.v4i8(<4 x i8> <i8 undef, i8 1, i8 undef, i8 undef>, <4 x i8> <i8 undef, i8 2, i8 undef, i8 1>, <4 x i8> <i8 undef, i8 undef, i8 3, i8 0>)
   ret <4 x i8> %f
@@ -242,8 +225,7 @@ define <4 x i8> @fshr_vector_mix1() {
 ; 1/Undef/8; Undef/-1/2; -1/Undef/2; 7/8/4
 define <4 x i8> @fshr_vector_mix2() {
 ; CHECK-LABEL: @fshr_vector_mix2(
-; CHECK-NEXT:    [[F:%.*]] = call <4 x i8> @llvm.fshr.v4i8(<4 x i8> <i8 1, i8 undef, i8 -1, i8 7>, <4 x i8> <i8 undef, i8 -1, i8 undef, i8 8>, <4 x i8> <i8 8, i8 2, i8 2, i8 4>)
-; CHECK-NEXT:    ret <4 x i8> [[F]]
+; CHECK-NEXT:    ret <4 x i8> <i8 undef, i8 63, i8 -64, i8 112>
 ;
   %f = call <4 x i8> @llvm.fshr.v4i8(<4 x i8> <i8 1, i8 undef, i8 -1, i8 7>, <4 x i8> <i8 undef, i8 -1, i8 undef, i8 8>, <4 x i8> <i8 8, i8 2, i8 2, i8 4>)
   ret <4 x i8> %f

Modified: llvm/trunk/test/Analysis/ConstantFolding/saturating-add-sub.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ConstantFolding/saturating-add-sub.ll?rev=350971&r1=350970&r2=350971&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ConstantFolding/saturating-add-sub.ll (original)
+++ llvm/trunk/test/Analysis/ConstantFolding/saturating-add-sub.ll Fri Jan 11 13:18:00 2019
@@ -175,8 +175,7 @@ define <2 x i8> @test_ssub_vector_sat_ne
 
 define i8 @test_uadd_scalar_both_undef() {
 ; CHECK-LABEL: @test_uadd_scalar_both_undef(
-; CHECK-NEXT:    [[X:%.*]] = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
-; CHECK-NEXT:    ret i8 [[X]]
+; CHECK-NEXT:    ret i8 undef
 ;
   %x = call i8 @llvm.uadd.sat.i8(i8 undef, i8 undef)
   ret i8 %x
@@ -184,8 +183,7 @@ define i8 @test_uadd_scalar_both_undef()
 
 define i8 @test_sadd_scalar_both_undef() {
 ; CHECK-LABEL: @test_sadd_scalar_both_undef(
-; CHECK-NEXT:    [[X:%.*]] = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
-; CHECK-NEXT:    ret i8 [[X]]
+; CHECK-NEXT:    ret i8 undef
 ;
   %x = call i8 @llvm.sadd.sat.i8(i8 undef, i8 undef)
   ret i8 %x
@@ -193,8 +191,7 @@ define i8 @test_sadd_scalar_both_undef()
 
 define i8 @test_usub_scalar_both_undef() {
 ; CHECK-LABEL: @test_usub_scalar_both_undef(
-; CHECK-NEXT:    [[X:%.*]] = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
-; CHECK-NEXT:    ret i8 [[X]]
+; CHECK-NEXT:    ret i8 undef
 ;
   %x = call i8 @llvm.usub.sat.i8(i8 undef, i8 undef)
   ret i8 %x
@@ -202,8 +199,7 @@ define i8 @test_usub_scalar_both_undef()
 
 define i8 @test_ssub_scalar_both_undef() {
 ; CHECK-LABEL: @test_ssub_scalar_both_undef(
-; CHECK-NEXT:    [[X:%.*]] = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
-; CHECK-NEXT:    ret i8 [[X]]
+; CHECK-NEXT:    ret i8 undef
 ;
   %x = call i8 @llvm.ssub.sat.i8(i8 undef, i8 undef)
   ret i8 %x
@@ -211,8 +207,7 @@ define i8 @test_ssub_scalar_both_undef()
 
 define i8 @test_uadd_scalar_op2_undef() {
 ; CHECK-LABEL: @test_uadd_scalar_op2_undef(
-; CHECK-NEXT:    [[X:%.*]] = call i8 @llvm.uadd.sat.i8(i8 10, i8 undef)
-; CHECK-NEXT:    ret i8 [[X]]
+; CHECK-NEXT:    ret i8 -1
 ;
   %x = call i8 @llvm.uadd.sat.i8(i8 10, i8 undef)
   ret i8 %x
@@ -220,8 +215,7 @@ define i8 @test_uadd_scalar_op2_undef()
 
 define i8 @test_sadd_scalar_op1_undef() {
 ; CHECK-LABEL: @test_sadd_scalar_op1_undef(
-; CHECK-NEXT:    [[X:%.*]] = call i8 @llvm.sadd.sat.i8(i8 undef, i8 10)
-; CHECK-NEXT:    ret i8 [[X]]
+; CHECK-NEXT:    ret i8 -1
 ;
   %x = call i8 @llvm.sadd.sat.i8(i8 undef, i8 10)
   ret i8 %x
@@ -229,8 +223,7 @@ define i8 @test_sadd_scalar_op1_undef()
 
 define i8 @test_usub_scalar_op2_undef() {
 ; CHECK-LABEL: @test_usub_scalar_op2_undef(
-; CHECK-NEXT:    [[X:%.*]] = call i8 @llvm.usub.sat.i8(i8 10, i8 undef)
-; CHECK-NEXT:    ret i8 [[X]]
+; CHECK-NEXT:    ret i8 0
 ;
   %x = call i8 @llvm.usub.sat.i8(i8 10, i8 undef)
   ret i8 %x
@@ -238,8 +231,7 @@ define i8 @test_usub_scalar_op2_undef()
 
 define i8 @test_usub_scalar_op1_undef() {
 ; CHECK-LABEL: @test_usub_scalar_op1_undef(
-; CHECK-NEXT:    [[X:%.*]] = call i8 @llvm.usub.sat.i8(i8 undef, i8 10)
-; CHECK-NEXT:    ret i8 [[X]]
+; CHECK-NEXT:    ret i8 0
 ;
   %x = call i8 @llvm.usub.sat.i8(i8 undef, i8 10)
   ret i8 %x
@@ -247,8 +239,7 @@ define i8 @test_usub_scalar_op1_undef()
 
 define <2 x i8> @test_uadd_vector_both_undef_splat() {
 ; CHECK-LABEL: @test_uadd_vector_both_undef_splat(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> undef
 ;
   %x = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
   ret <2 x i8> %x
@@ -256,8 +247,7 @@ define <2 x i8> @test_uadd_vector_both_u
 
 define <2 x i8> @test_sadd_vector_both_undef_splat() {
 ; CHECK-LABEL: @test_sadd_vector_both_undef_splat(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> undef
 ;
   %x = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
   ret <2 x i8> %x
@@ -265,8 +255,7 @@ define <2 x i8> @test_sadd_vector_both_u
 
 define <2 x i8> @test_usub_vector_both_undef_splat() {
 ; CHECK-LABEL: @test_usub_vector_both_undef_splat(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> undef
 ;
   %x = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
   ret <2 x i8> %x
@@ -274,8 +263,7 @@ define <2 x i8> @test_usub_vector_both_u
 
 define <2 x i8> @test_ssub_vector_both_undef_splat() {
 ; CHECK-LABEL: @test_ssub_vector_both_undef_splat(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> undef
 ;
   %x = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> undef)
   ret <2 x i8> %x
@@ -283,8 +271,7 @@ define <2 x i8> @test_ssub_vector_both_u
 
 define <2 x i8> @test_uadd_vector_op2_undef_splat() {
 ; CHECK-LABEL: @test_uadd_vector_op2_undef_splat(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 10, i8 20>, <2 x i8> undef)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> <i8 -1, i8 -1>
 ;
   %x = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 10, i8 20>, <2 x i8> undef)
   ret <2 x i8> %x
@@ -292,8 +279,7 @@ define <2 x i8> @test_uadd_vector_op2_un
 
 define <2 x i8> @test_sadd_vector_op1_undef_splat() {
 ; CHECK-LABEL: @test_sadd_vector_op1_undef_splat(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> <i8 10, i8 20>)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> <i8 -1, i8 -1>
 ;
   %x = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> undef, <2 x i8> <i8 10, i8 20>)
   ret <2 x i8> %x
@@ -301,8 +287,7 @@ define <2 x i8> @test_sadd_vector_op1_un
 
 define <2 x i8> @test_usub_vector_op2_undef_splat() {
 ; CHECK-LABEL: @test_usub_vector_op2_undef_splat(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> <i8 10, i8 20>, <2 x i8> undef)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> zeroinitializer
 ;
   %x = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> <i8 10, i8 20>, <2 x i8> undef)
   ret <2 x i8> %x
@@ -310,8 +295,7 @@ define <2 x i8> @test_usub_vector_op2_un
 
 define <2 x i8> @test_ssub_vector_op1_undef_splat() {
 ; CHECK-LABEL: @test_ssub_vector_op1_undef_splat(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> <i8 10, i8 20>)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> zeroinitializer
 ;
   %x = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> undef, <2 x i8> <i8 10, i8 20>)
   ret <2 x i8> %x
@@ -319,8 +303,7 @@ define <2 x i8> @test_ssub_vector_op1_un
 
 define <2 x i8> @test_uadd_vector_op2_undef_mix1() {
 ; CHECK-LABEL: @test_uadd_vector_op2_undef_mix1(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 10, i8 undef>, <2 x i8> <i8 20, i8 undef>)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> <i8 30, i8 undef>
 ;
   %x = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 10, i8 undef>, <2 x i8> <i8 20, i8 undef>)
   ret <2 x i8> %x
@@ -328,8 +311,7 @@ define <2 x i8> @test_uadd_vector_op2_un
 
 define <2 x i8> @test_uadd_vector_op2_undef_mix2() {
 ; CHECK-LABEL: @test_uadd_vector_op2_undef_mix2(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 10, i8 undef>, <2 x i8> <i8 undef, i8 20>)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> <i8 -1, i8 -1>
 ;
   %x = call <2 x i8> @llvm.uadd.sat.v2i8(<2 x i8> <i8 10, i8 undef>, <2 x i8> <i8 undef, i8 20>)
   ret <2 x i8> %x
@@ -337,8 +319,7 @@ define <2 x i8> @test_uadd_vector_op2_un
 
 define <2 x i8> @test_sadd_vector_op1_undef_mix1() {
 ; CHECK-LABEL: @test_sadd_vector_op1_undef_mix1(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 undef, i8 10>, <2 x i8> <i8 undef, i8 20>)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> <i8 undef, i8 30>
 ;
   %x = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 undef, i8 10>, <2 x i8> <i8 undef, i8 20>)
   ret <2 x i8> %x
@@ -346,8 +327,7 @@ define <2 x i8> @test_sadd_vector_op1_un
 
 define <2 x i8> @test_sadd_vector_op1_undef_mix2() {
 ; CHECK-LABEL: @test_sadd_vector_op1_undef_mix2(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 undef, i8 10>, <2 x i8> <i8 20, i8 undef>)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> <i8 -1, i8 -1>
 ;
   %x = call <2 x i8> @llvm.sadd.sat.v2i8(<2 x i8> <i8 undef, i8 10>, <2 x i8> <i8 20, i8 undef>)
   ret <2 x i8> %x
@@ -355,8 +335,7 @@ define <2 x i8> @test_sadd_vector_op1_un
 
 define <2 x i8> @test_usub_vector_op2_undef_mix1() {
 ; CHECK-LABEL: @test_usub_vector_op2_undef_mix1(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> <i8 10, i8 undef>, <2 x i8> <i8 20, i8 undef>)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> <i8 0, i8 undef>
 ;
   %x = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> <i8 10, i8 undef>, <2 x i8> <i8 20, i8 undef>)
   ret <2 x i8> %x
@@ -364,8 +343,7 @@ define <2 x i8> @test_usub_vector_op2_un
 
 define <2 x i8> @test_usub_vector_op2_undef_mix2() {
 ; CHECK-LABEL: @test_usub_vector_op2_undef_mix2(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> <i8 10, i8 undef>, <2 x i8> <i8 undef, i8 20>)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> zeroinitializer
 ;
   %x = call <2 x i8> @llvm.usub.sat.v2i8(<2 x i8> <i8 10, i8 undef>, <2 x i8> <i8 undef, i8 20>)
   ret <2 x i8> %x
@@ -373,8 +351,7 @@ define <2 x i8> @test_usub_vector_op2_un
 
 define <2 x i8> @test_ssub_vector_op1_undef_mix1() {
 ; CHECK-LABEL: @test_ssub_vector_op1_undef_mix1(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> <i8 undef, i8 10>, <2 x i8> <i8 undef, i8 20>)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> <i8 undef, i8 -10>
 ;
   %x = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> <i8 undef, i8 10>, <2 x i8> <i8 undef, i8 20>)
   ret <2 x i8> %x
@@ -382,8 +359,7 @@ define <2 x i8> @test_ssub_vector_op1_un
 
 define <2 x i8> @test_ssub_vector_op1_undef_mix2() {
 ; CHECK-LABEL: @test_ssub_vector_op1_undef_mix2(
-; CHECK-NEXT:    [[X:%.*]] = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> <i8 undef, i8 10>, <2 x i8> <i8 20, i8 undef>)
-; CHECK-NEXT:    ret <2 x i8> [[X]]
+; CHECK-NEXT:    ret <2 x i8> zeroinitializer
 ;
   %x = call <2 x i8> @llvm.ssub.sat.v2i8(<2 x i8> <i8 undef, i8 10>, <2 x i8> <i8 20, i8 undef>)
   ret <2 x i8> %x

Modified: llvm/trunk/test/Transforms/ConstProp/overflow-ops.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/ConstProp/overflow-ops.ll?rev=350971&r1=350970&r2=350971&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/ConstProp/overflow-ops.ll (original)
+++ llvm/trunk/test/Transforms/ConstProp/overflow-ops.ll Fri Jan 11 13:18:00 2019
@@ -31,8 +31,7 @@ define {i8, i1} @uadd_2() nounwind {
 
 define {i8, i1} @uadd_undef() nounwind {
 ; CHECK-LABEL: @uadd_undef(
-; CHECK-NEXT:    [[T:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 -114, i8 undef)
-; CHECK-NEXT:    ret { i8, i1 } [[T]]
+; CHECK-NEXT:    ret { i8, i1 } undef
 ;
   %t = call {i8, i1} @llvm.uadd.with.overflow.i8(i8 142, i8 undef)
   ret {i8, i1} %t
@@ -60,8 +59,7 @@ define {i8, i1} @usub_2() nounwind {
 
 define {i8, i1} @usub_undef() nounwind {
 ; CHECK-LABEL: @usub_undef(
-; CHECK-NEXT:    [[T:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 4, i8 undef)
-; CHECK-NEXT:    ret { i8, i1 } [[T]]
+; CHECK-NEXT:    ret { i8, i1 } undef
 ;
   %t = call {i8, i1} @llvm.usub.with.overflow.i8(i8 4, i8 undef)
   ret {i8, i1} %t
@@ -89,8 +87,7 @@ define {i8, i1} @umul_2() nounwind {
 
 define {i8, i1} @umul_undef() nounwind {
 ; CHECK-LABEL: @umul_undef(
-; CHECK-NEXT:    [[T:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 undef, i8 2)
-; CHECK-NEXT:    ret { i8, i1 } [[T]]
+; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
 ;
   %t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 undef, i8 2)
   ret {i8, i1} %t
@@ -98,8 +95,7 @@ define {i8, i1} @umul_undef() nounwind {
 
 define {i8, i1} @umul_both_undef() nounwind {
 ; CHECK-LABEL: @umul_both_undef(
-; CHECK-NEXT:    [[T:%.*]] = call { i8, i1 } @llvm.umul.with.overflow.i8(i8 undef, i8 undef)
-; CHECK-NEXT:    ret { i8, i1 } [[T]]
+; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
 ;
   %t = call {i8, i1} @llvm.umul.with.overflow.i8(i8 undef, i8 undef)
   ret {i8, i1} %t
@@ -151,8 +147,7 @@ define {i8, i1} @sadd_5() nounwind {
 
 define {i8, i1} @sadd_undef() nounwind {
 ; CHECK-LABEL: @sadd_undef(
-; CHECK-NEXT:    [[T:%.*]] = call { i8, i1 } @llvm.sadd.with.overflow.i8(i8 undef, i8 -10)
-; CHECK-NEXT:    ret { i8, i1 } [[T]]
+; CHECK-NEXT:    ret { i8, i1 } undef
 ;
   %t = call {i8, i1} @llvm.sadd.with.overflow.i8(i8 undef, i8 -10)
   ret {i8, i1} %t
@@ -220,8 +215,7 @@ define {i8, i1} @ssub_5() nounwind {
 
 define {i8, i1} @ssub_undef() nounwind {
 ; CHECK-LABEL: @ssub_undef(
-; CHECK-NEXT:    [[T:%.*]] = call { i8, i1 } @llvm.ssub.with.overflow.i8(i8 undef, i8 -10)
-; CHECK-NEXT:    ret { i8, i1 } [[T]]
+; CHECK-NEXT:    ret { i8, i1 } undef
 ;
   %t = call {i8, i1} @llvm.ssub.with.overflow.i8(i8 undef, i8 -10)
   ret {i8, i1} %t
@@ -241,8 +235,7 @@ define {i8, i1} @smul_1() nounwind {
 
 define {i8, i1} @smul_undef() nounwind {
 ; CHECK-LABEL: @smul_undef(
-; CHECK-NEXT:    [[T:%.*]] = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 -20, i8 undef)
-; CHECK-NEXT:    ret { i8, i1 } [[T]]
+; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
 ;
   %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 -20, i8 undef)
   ret {i8, i1} %t
@@ -250,8 +243,7 @@ define {i8, i1} @smul_undef() nounwind {
 
 define {i8, i1} @smul_both_undef() nounwind {
 ; CHECK-LABEL: @smul_both_undef(
-; CHECK-NEXT:    [[T:%.*]] = call { i8, i1 } @llvm.smul.with.overflow.i8(i8 undef, i8 undef)
-; CHECK-NEXT:    ret { i8, i1 } [[T]]
+; CHECK-NEXT:    ret { i8, i1 } zeroinitializer
 ;
   %t = call {i8, i1} @llvm.smul.with.overflow.i8(i8 undef, i8 undef)
   ret {i8, i1} %t

Removed: llvm/trunk/test/Transforms/InstCombine/saturating-add-sub-vector.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/saturating-add-sub-vector.ll?rev=350970&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/saturating-add-sub-vector.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/saturating-add-sub-vector.ll (removed)
@@ -1,411 +0,0 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; RUN: opt < %s -instcombine -S | FileCheck %s
-
-define <16 x i8> @sadd_sat_v16i8_constant() {
-; CHECK-LABEL: @sadd_sat_v16i8_constant(
-; CHECK-NEXT:    ret <16 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33>
-;
-  %1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <16 x i8> %1
-}
-
-define <16 x i8> @sadd_sat_v16i8_constant_underflow() {
-; CHECK-LABEL: @sadd_sat_v16i8_constant_underflow(
-; CHECK-NEXT:    ret <16 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -128, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33>
-;
-  %1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -107, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <16 x i8> %1
-}
-
-define <16 x i8> @sadd_sat_v16i8_constant_overflow() {
-; CHECK-LABEL: @sadd_sat_v16i8_constant_overflow(
-; CHECK-NEXT:    ret <16 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 127, i8 24, i8 26, i8 28, i8 30, i8 127>
-;
-  %1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 125, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 120>)
-  ret <16 x i8> %1
-}
-
-define <16 x i8> @sadd_sat_v16i8_constant_undefs() {
-; CHECK-LABEL: @sadd_sat_v16i8_constant_undefs(
-; CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-; CHECK-NEXT:    ret <16 x i8> [[TMP1]]
-;
-  %1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <16 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <16 x i8> %1
-}
-
-define <32 x i8> @sadd_sat_v32i8_constant() {
-; CHECK-LABEL: @sadd_sat_v32i8_constant(
-; CHECK-NEXT:    ret <32 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33>
-;
-  %1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <32 x i8> %1
-}
-
-define <32 x i8> @sadd_sat_v32i8_constant_underflow() {
-; CHECK-LABEL: @sadd_sat_v32i8_constant_underflow(
-; CHECK-NEXT:    ret <32 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -128, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -128, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33>
-;
-  %1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -107, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -107, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <32 x i8> %1
-}
-
-define <32 x i8> @sadd_sat_v32i8_constant_overflow() {
-; CHECK-LABEL: @sadd_sat_v32i8_constant_overflow(
-; CHECK-NEXT:    ret <32 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 127, i8 24, i8 26, i8 28, i8 30, i8 127, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 127, i8 24, i8 26, i8 28, i8 30, i8 127>
-;
-  %1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 125, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 125, i8 12, i8 13, i8 14, i8 15, i8 16>, <32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 120>)
-  ret <32 x i8> %1
-}
-
-define <32 x i8> @sadd_sat_v32i8_constant_undefs() {
-; CHECK-LABEL: @sadd_sat_v32i8_constant_undefs(
-; CHECK-NEXT:    [[TMP1:%.*]] = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <32 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-; CHECK-NEXT:    ret <32 x i8> [[TMP1]]
-;
-  %1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <32 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <32 x i8> %1
-}
-
-define <64 x i8> @sadd_sat_v64i8_constant() {
-; CHECK-LABEL: @sadd_sat_v64i8_constant(
-; CHECK-NEXT:    ret <64 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33>
-;
-  %1 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <64 x i8> %1
-}
-
-define <64 x i8> @sadd_sat_v64i8_constant_underflow() {
-; CHECK-LABEL: @sadd_sat_v64i8_constant_underflow(
-; CHECK-NEXT:    ret <64 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -128, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -128, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -128, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 -128, i8 18, i8 20, i8 22, i8 24, i8 26, i8 28, i8 30, i8 33>
-;
-  %1 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -107, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -107, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -107, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -107, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 -48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <64 x i8> %1
-}
-
-define <64 x i8> @sadd_sat_v64i8_constant_overflow() {
-; CHECK-LABEL: @sadd_sat_v64i8_constant_overflow(
-; CHECK-NEXT:    ret <64 x i8> <i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 127, i8 24, i8 26, i8 28, i8 30, i8 127, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 127, i8 24, i8 26, i8 28, i8 30, i8 127, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 127, i8 24, i8 26, i8 28, i8 30, i8 127, i8 2, i8 4, i8 6, i8 8, i8 10, i8 12, i8 14, i8 16, i8 18, i8 20, i8 127, i8 24, i8 26, i8 28, i8 30, i8 127>
-;
-  %1 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 125, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 125, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 125, i8 12, i8 13, i8 14, i8 15, i8 16, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 125, i8 12, i8 13, i8 14, i8 15, i8 16>, <64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 120>)
-  ret <64 x i8> %1
-}
-
-define <64 x i8> @sadd_sat_v64i8_constant_undefs() {
-; CHECK-LABEL: @sadd_sat_v64i8_constant_undefs(
-; CHECK-NEXT:    [[TMP1:%.*]] = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <64 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-; CHECK-NEXT:    ret <64 x i8> [[TMP1]]
-;
-  %1 = call <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 undef, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 16>, <64 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <64 x i8> %1
-}
-
-define <8 x i16> @sadd_sat_v8i16_constant() {
-; CHECK-LABEL: @sadd_sat_v8i16_constant(
-; CHECK-NEXT:    ret <8 x i16> <i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16>
-;
-  %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>)
-  ret <8 x i16> %1
-}
-
-define <8 x i16> @sadd_sat_v8i16_constant_underflow() {
-; CHECK-LABEL: @sadd_sat_v8i16_constant_underflow(
-; CHECK-NEXT:    ret <8 x i16> <i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 -32768>
-;
-  %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -32107>, <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -12188>)
-  ret <8 x i16> %1
-}
-
-define <8 x i16> @sadd_sat_v8i16_constant_overflow() {
-; CHECK-LABEL: @sadd_sat_v8i16_constant_overflow(
-; CHECK-NEXT:    ret <8 x i16> <i16 2, i16 32767, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16>
-;
-  %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> <i16 1, i16 8248, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>, <8 x i16> <i16 1, i16 25192, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>)
-  ret <8 x i16> %1
-}
-
-define <8 x i16> @sadd_sat_v8i16_constant_undefs() {
-; CHECK-LABEL: @sadd_sat_v8i16_constant_undefs(
-; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 undef, i16 7, i16 8>, <8 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>)
-; CHECK-NEXT:    ret <8 x i16> [[TMP1]]
-;
-  %1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 undef, i16 7, i16 8>, <8 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>)
-  ret <8 x i16> %1
-}
-
-define <16 x i16> @sadd_sat_v16i16_constant() {
-; CHECK-LABEL: @sadd_sat_v16i16_constant(
-; CHECK-NEXT:    ret <16 x i16> <i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30, i16 33>
-;
-  %1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>, <16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <16 x i16> %1
-}
-
-define <16 x i16> @sadd_sat_v16i16_constant_underflow() {
-; CHECK-LABEL: @sadd_sat_v16i16_constant_underflow(
-; CHECK-NEXT:    ret <16 x i16> <i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 -32768, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30, i16 33>
-;
-  %1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -21107, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>, <16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -15188, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <16 x i16> %1
-}
-
-define <16 x i16> @sadd_sat_v16i16_constant_overflow() {
-; CHECK-LABEL: @sadd_sat_v16i16_constant_overflow(
-; CHECK-NEXT:    ret <16 x i16> <i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16, i16 18, i16 20, i16 32767, i16 24, i16 26, i16 28, i16 30, i16 32767>
-;
-  %1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 20125, i16 12, i16 13, i16 14, i16 15, i16 20160>, <16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 20230, i16 12, i16 13, i16 14, i16 15, i16 20120>)
-  ret <16 x i16> %1
-}
-
-define <16 x i16> @sadd_sat_v16i16_constant_undefs() {
-; CHECK-LABEL: @sadd_sat_v16i16_constant_undefs(
-; CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 undef, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>, <16 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-; CHECK-NEXT:    ret <16 x i16> [[TMP1]]
-;
-  %1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 undef, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>, <16 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <16 x i16> %1
-}
-
-define <32 x i16> @sadd_sat_v32i16_constant() {
-; CHECK-LABEL: @sadd_sat_v32i16_constant(
-; CHECK-NEXT:    ret <32 x i16> <i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30, i16 33, i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30, i16 33>
-;
-  %1 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>, <32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @sadd_sat_v32i16_constant_underflow() {
-; CHECK-LABEL: @sadd_sat_v32i16_constant_underflow(
-; CHECK-NEXT:    ret <32 x i16> <i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 -32768, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30, i16 33, i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 -32768, i16 18, i16 20, i16 22, i16 24, i16 26, i16 28, i16 30, i16 33>
-;
-  %1 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -20107, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -20107, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>, <32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -20168, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 -20248, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @sadd_sat_v32i16_constant_overflow() {
-; CHECK-LABEL: @sadd_sat_v32i16_constant_overflow(
-; CHECK-NEXT:    ret <32 x i16> <i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16, i16 18, i16 20, i16 32767, i16 24, i16 26, i16 28, i16 30, i16 32767, i16 2, i16 4, i16 6, i16 8, i16 10, i16 12, i16 14, i16 16, i16 18, i16 20, i16 32767, i16 24, i16 26, i16 28, i16 30, i16 32767>
-;
-  %1 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 20125, i16 12, i16 13, i16 14, i16 15, i16 20200, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 20125, i16 12, i16 13, i16 14, i16 15, i16 20200>, <32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 20200, i16 12, i16 13, i16 14, i16 15, i16 20120, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 20211, i16 12, i16 13, i16 14, i16 15, i16 20120>)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @sadd_sat_v32i16_constant_undefs() {
-; CHECK-LABEL: @sadd_sat_v32i16_constant_undefs(
-; CHECK-NEXT:    [[TMP1:%.*]] = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 undef, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 undef, i16 2, i16 3, i16 4, i16 5, i16 undef, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>, <32 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17, i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-; CHECK-NEXT:    ret <32 x i16> [[TMP1]]
-;
-  %1 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 undef, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16, i16 undef, i16 2, i16 3, i16 4, i16 5, i16 undef, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 16>, <32 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17, i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <32 x i16> %1
-}
-
-define <16 x i8> @ssub_sat_v16i8_constant() {
-; CHECK-LABEL: @ssub_sat_v16i8_constant(
-; CHECK-NEXT:    ret <16 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33>
-;
-  %1 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <16 x i8> %1
-}
-
-define <16 x i8> @ssub_sat_v16i8_constant_underflow() {
-; CHECK-LABEL: @ssub_sat_v16i8_constant_underflow(
-; CHECK-NEXT:    ret <16 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -128, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33>
-;
-  %1 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -107, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <16 x i8> %1
-}
-
-define <16 x i8> @ssub_sat_v16i8_constant_overflow() {
-; CHECK-LABEL: @ssub_sat_v16i8_constant_overflow(
-; CHECK-NEXT:    ret <16 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 127, i8 -24, i8 -26, i8 -28, i8 -30, i8 127>
-;
-  %1 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 125, i8 -12, i8 -13, i8 -14, i8 -15, i8 16>, <16 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 -11, i8 12, i8 13, i8 14, i8 15, i8 -120>)
-  ret <16 x i8> %1
-}
-
-define <16 x i8> @ssub_sat_v16i8_constant_undefs() {
-; CHECK-LABEL: @ssub_sat_v16i8_constant_undefs(
-; CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> <i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <16 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-; CHECK-NEXT:    ret <16 x i8> [[TMP1]]
-;
-  %1 = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> <i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <16 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <16 x i8> %1
-}
-
-define <32 x i8> @ssub_sat_v32i8_constant() {
-; CHECK-LABEL: @ssub_sat_v32i8_constant(
-; CHECK-NEXT:    ret <32 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33>
-;
-  %1 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <32 x i8> %1
-}
-
-define <32 x i8> @ssub_sat_v32i8_constant_underflow() {
-; CHECK-LABEL: @ssub_sat_v32i8_constant_underflow(
-; CHECK-NEXT:    ret <32 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -128, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -128, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33>
-;
-  %1 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -107, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -107, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <32 x i8> %1
-}
-
-define <32 x i8> @ssub_sat_v32i8_constant_overflow() {
-; CHECK-LABEL: @ssub_sat_v32i8_constant_overflow(
-; CHECK-NEXT:    ret <32 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 127, i8 -24, i8 -26, i8 -28, i8 -30, i8 127, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 127, i8 -24, i8 -26, i8 -28, i8 -30, i8 127>
-;
-  %1 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 125, i8 -12, i8 -13, i8 -14, i8 -15, i8 16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 125, i8 -12, i8 -13, i8 -14, i8 -15, i8 16>, <32 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 -11, i8 12, i8 13, i8 14, i8 15, i8 -120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 -11, i8 12, i8 13, i8 14, i8 15, i8 -120>)
-  ret <32 x i8> %1
-}
-
-define <32 x i8> @ssub_sat_v32i8_constant_undefs() {
-; CHECK-LABEL: @ssub_sat_v32i8_constant_undefs(
-; CHECK-NEXT:    [[TMP1:%.*]] = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> <i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <32 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-; CHECK-NEXT:    ret <32 x i8> [[TMP1]]
-;
-  %1 = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> <i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <32 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <32 x i8> %1
-}
-
-define <64 x i8> @ssub_sat_v64i8_constant() {
-; CHECK-LABEL: @ssub_sat_v64i8_constant(
-; CHECK-NEXT:    ret <64 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33>
-;
-  %1 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <64 x i8> %1
-}
-
-define <64 x i8> @ssub_sat_v64i8_constant_underflow() {
-; CHECK-LABEL: @ssub_sat_v64i8_constant_underflow(
-; CHECK-NEXT:    ret <64 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -128, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -128, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -128, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -128, i8 -18, i8 -20, i8 -22, i8 -24, i8 -26, i8 -28, i8 -30, i8 -33>
-;
-  %1 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -107, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -107, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -107, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -107, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 48, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <64 x i8> %1
-}
-
-define <64 x i8> @ssub_sat_v64i8_constant_overflow() {
-; CHECK-LABEL: @ssub_sat_v64i8_constant_overflow(
-; CHECK-NEXT:    ret <64 x i8> <i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 127, i8 -24, i8 -26, i8 -28, i8 -30, i8 127, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 127, i8 -24, i8 -26, i8 -28, i8 -30, i8 127, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 127, i8 -24, i8 -26, i8 -28, i8 -30, i8 127, i8 -2, i8 -4, i8 -6, i8 -8, i8 -10, i8 -12, i8 -14, i8 -16, i8 -18, i8 -20, i8 127, i8 -24, i8 -26, i8 -28, i8 -30, i8 127>
-;
-  %1 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> <i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 125, i8 -12, i8 -13, i8 -14, i8 -15, i8 16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 125, i8 -12, i8 -13, i8 -14, i8 -15, i8 16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 125, i8 -12, i8 -13, i8 -14, i8 -15, i8 16, i8 -1, i8 -2, i8 -3, i8 -4, i8 -5, i8 -6, i8 -7, i8 -8, i8 -9, i8 -10, i8 125, i8 -12, i8 -13, i8 -14, i8 -15, i8 16>, <64 x i8> <i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 -11, i8 12, i8 13, i8 14, i8 15, i8 -120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 -11, i8 12, i8 13, i8 14, i8 15, i8 -120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 -11, i8 12, i8 13, i8 14, i8 15, i8 -120, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 -11, i8 12, i8 13, i8 14, i8 15, i8 -120>)
-  ret <64 x i8> %1
-}
-
-define <64 x i8> @ssub_sat_v64i8_constant_undefs() {
-; CHECK-LABEL: @ssub_sat_v64i8_constant_undefs(
-; CHECK-NEXT:    [[TMP1:%.*]] = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> <i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <64 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-; CHECK-NEXT:    ret <64 x i8> [[TMP1]]
-;
-  %1 = call <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8> <i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16, i8 undef, i8 -2, i8 -3, i8 -4, i8 -5, i8 undef, i8 -7, i8 -8, i8 -9, i8 -10, i8 -11, i8 -12, i8 -13, i8 -14, i8 -15, i8 -16>, <64 x i8> <i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17, i8 undef, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 8, i8 9, i8 10, i8 11, i8 12, i8 13, i8 14, i8 15, i8 17>)
-  ret <64 x i8> %1
-}
-
-define <8 x i16> @ssub_sat_v8i16_constant() {
-; CHECK-LABEL: @ssub_sat_v8i16_constant(
-; CHECK-NEXT:    ret <8 x i16> <i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16>
-;
-  %1 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8>, <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>)
-  ret <8 x i16> %1
-}
-
-define <8 x i16> @ssub_sat_v8i16_constant_underflow() {
-; CHECK-LABEL: @ssub_sat_v8i16_constant_underflow(
-; CHECK-NEXT:    ret <8 x i16> <i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -32768>
-;
-  %1 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -32107>, <8 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 12188>)
-  ret <8 x i16> %1
-}
-
-define <8 x i16> @ssub_sat_v8i16_constant_overflow() {
-; CHECK-LABEL: @ssub_sat_v8i16_constant_overflow(
-; CHECK-NEXT:    ret <8 x i16> <i16 -2, i16 32767, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16>
-;
-  %1 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> <i16 -1, i16 8248, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8>, <8 x i16> <i16 1, i16 -25192, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>)
-  ret <8 x i16> %1
-}
-
-define <8 x i16> @ssub_sat_v8i16_constant_undefs() {
-; CHECK-LABEL: @ssub_sat_v8i16_constant_undefs(
-; CHECK-NEXT:    [[TMP1:%.*]] = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> <i16 undef, i16 -2, i16 -3, i16 -4, i16 -5, i16 undef, i16 -7, i16 -8>, <8 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>)
-; CHECK-NEXT:    ret <8 x i16> [[TMP1]]
-;
-  %1 = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> <i16 undef, i16 -2, i16 -3, i16 -4, i16 -5, i16 undef, i16 -7, i16 -8>, <8 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>)
-  ret <8 x i16> %1
-}
-
-define <16 x i16> @ssub_sat_v16i16_constant() {
-; CHECK-LABEL: @ssub_sat_v16i16_constant(
-; CHECK-NEXT:    ret <16 x i16> <i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16, i16 -18, i16 -20, i16 -22, i16 -24, i16 -26, i16 -28, i16 -30, i16 -33>
-;
-  %1 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16>, <16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <16 x i16> %1
-}
-
-define <16 x i16> @ssub_sat_v16i16_constant_underflow() {
-; CHECK-LABEL: @ssub_sat_v16i16_constant_underflow(
-; CHECK-NEXT:    ret <16 x i16> <i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -32768, i16 -18, i16 -20, i16 -22, i16 -24, i16 -26, i16 -28, i16 -30, i16 -33>
-;
-  %1 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -21107, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16>, <16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 15188, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <16 x i16> %1
-}
-
-define <16 x i16> @ssub_sat_v16i16_constant_overflow() {
-; CHECK-LABEL: @ssub_sat_v16i16_constant_overflow(
-; CHECK-NEXT:    ret <16 x i16> <i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16, i16 -18, i16 -20, i16 32767, i16 -24, i16 -26, i16 -28, i16 -30, i16 32767>
-;
-  %1 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 20125, i16 -12, i16 -13, i16 -14, i16 -15, i16 20160>, <16 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 -20230, i16 12, i16 13, i16 14, i16 15, i16 -20120>)
-  ret <16 x i16> %1
-}
-
-define <16 x i16> @ssub_sat_v16i16_constant_undefs() {
-; CHECK-LABEL: @ssub_sat_v16i16_constant_undefs(
-; CHECK-NEXT:    [[TMP1:%.*]] = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> <i16 undef, i16 -2, i16 -3, i16 -4, i16 -5, i16 undef, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16>, <16 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-; CHECK-NEXT:    ret <16 x i16> [[TMP1]]
-;
-  %1 = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> <i16 undef, i16 -2, i16 -3, i16 -4, i16 -5, i16 undef, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16>, <16 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <16 x i16> %1
-}
-
-define <32 x i16> @ssub_sat_v32i16_constant() {
-; CHECK-LABEL: @ssub_sat_v32i16_constant(
-; CHECK-NEXT:    ret <32 x i16> <i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16, i16 -18, i16 -20, i16 -22, i16 -24, i16 -26, i16 -28, i16 -30, i16 -33, i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16, i16 -18, i16 -20, i16 -22, i16 -24, i16 -26, i16 -28, i16 -30, i16 -33>
-;
-  %1 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16, i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16>, <32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @ssub_sat_v32i16_constant_underflow() {
-; CHECK-LABEL: @ssub_sat_v32i16_constant_underflow(
-; CHECK-NEXT:    ret <32 x i16> <i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -32768, i16 -18, i16 -20, i16 -22, i16 -24, i16 -26, i16 -28, i16 -30, i16 -33, i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -32768, i16 -18, i16 -20, i16 -22, i16 -24, i16 -26, i16 -28, i16 -30, i16 -33>
-;
-  %1 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -20107, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16, i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -20107, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16>, <32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 20168, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 20248, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @ssub_sat_v32i16_constant_overflow() {
-; CHECK-LABEL: @ssub_sat_v32i16_constant_overflow(
-; CHECK-NEXT:    ret <32 x i16> <i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16, i16 -18, i16 -20, i16 32767, i16 -24, i16 -26, i16 -28, i16 -30, i16 32767, i16 -2, i16 -4, i16 -6, i16 -8, i16 -10, i16 -12, i16 -14, i16 -16, i16 -18, i16 -20, i16 32767, i16 -24, i16 -26, i16 -28, i16 -30, i16 32767>
-;
-  %1 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> <i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 20125, i16 -12, i16 -13, i16 -14, i16 -15, i16 20200, i16 -1, i16 -2, i16 -3, i16 -4, i16 -5, i16 -6, i16 -7, i16 -8, i16 -9, i16 -10, i16 20125, i16 -12, i16 -13, i16 -14, i16 -15, i16 20200>, <32 x i16> <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 -20200, i16 12, i16 13, i16 14, i16 15, i16 -20120, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 -20211, i16 12, i16 13, i16 14, i16 15, i16 -20120>)
-  ret <32 x i16> %1
-}
-
-define <32 x i16> @ssub_sat_v32i16_constant_undefs() {
-; CHECK-LABEL: @ssub_sat_v32i16_constant_undefs(
-; CHECK-NEXT:    [[TMP1:%.*]] = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> <i16 undef, i16 -2, i16 -3, i16 -4, i16 -5, i16 undef, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16, i16 undef, i16 -2, i16 -3, i16 -4, i16 -5, i16 undef, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16>, <32 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17, i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-; CHECK-NEXT:    ret <32 x i16> [[TMP1]]
-;
-  %1 = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> <i16 undef, i16 -2, i16 -3, i16 -4, i16 -5, i16 undef, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16, i16 undef, i16 -2, i16 -3, i16 -4, i16 -5, i16 undef, i16 -7, i16 -8, i16 -9, i16 -10, i16 -11, i16 -12, i16 -13, i16 -14, i16 -15, i16 -16>, <32 x i16> <i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17, i16 undef, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 17>)
-  ret <32 x i16> %1
-}
-
-declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
-declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
-declare <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
-declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
-declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
-declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
-declare <64 x i8> @llvm.sadd.sat.v64i8(<64 x i8>, <64 x i8>) nounwind readnone
-declare <64 x i8> @llvm.ssub.sat.v64i8(<64 x i8>, <64 x i8>) nounwind readnone
-declare <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16>, <32 x i16>) nounwind readnone
-declare <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16>, <32 x i16>) nounwind readnone




More information about the llvm-commits mailing list