[clang] 808ac54 - [Fixed Point] Use FixedPointBuilder to codegen fixed-point IR.

Bevin Hansson via cfe-commits cfe-commits at lists.llvm.org
Mon Aug 24 05:38:09 PDT 2020


Author: Bevin Hansson
Date: 2020-08-24T14:37:07+02:00
New Revision: 808ac54645212ddc9aba150cdc97454e36fb9521

URL: https://github.com/llvm/llvm-project/commit/808ac54645212ddc9aba150cdc97454e36fb9521
DIFF: https://github.com/llvm/llvm-project/commit/808ac54645212ddc9aba150cdc97454e36fb9521.diff

LOG: [Fixed Point] Use FixedPointBuilder to codegen fixed-point IR.

This changes the methods in CGExprScalar to use
FixedPointBuilder to generate IR for fixed-point
conversions and operations.

Since FixedPointBuilder emits padded operations slightly
differently than the original code, some tests change.

Reviewed By: leonardchan

Differential Revision: https://reviews.llvm.org/D86282

Added: 
    

Modified: 
    clang/lib/CodeGen/CGExprScalar.cpp
    clang/test/Frontend/fixed_point_add.c
    clang/test/Frontend/fixed_point_div.c
    clang/test/Frontend/fixed_point_mul.c
    clang/test/Frontend/fixed_point_sub.c
    clang/test/Frontend/fixed_point_unary.c

Removed: 
    


################################################################################
diff  --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index aad4d2d2a674..b3857b3eeb06 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -32,6 +32,7 @@
 #include "llvm/IR/CFG.h"
 #include "llvm/IR/Constants.h"
 #include "llvm/IR/DataLayout.h"
+#include "llvm/IR/FixedPointBuilder.h"
 #include "llvm/IR/Function.h"
 #include "llvm/IR/GetElementPtrTypeIterator.h"
 #include "llvm/IR/GlobalVariable.h"
@@ -356,11 +357,6 @@ class ScalarExprEmitter
   /// and an integer.
   Value *EmitFixedPointConversion(Value *Src, QualType SrcTy, QualType DstTy,
                                   SourceLocation Loc);
-  Value *EmitFixedPointConversion(Value *Src,
-                                  llvm::FixedPointSemantics &SrcFixedSema,
-                                  llvm::FixedPointSemantics &DstFixedSema,
-                                  SourceLocation Loc,
-                                  bool DstIsInteger = false);
 
   /// Emit a conversion from the specified complex type to the specified
   /// destination type, where the destination type is an LLVM scalar type.
@@ -1447,91 +1443,17 @@ Value *ScalarExprEmitter::EmitFixedPointConversion(Value *Src, QualType SrcTy,
                                                    SourceLocation Loc) {
   auto SrcFPSema = CGF.getContext().getFixedPointSemantics(SrcTy);
   auto DstFPSema = CGF.getContext().getFixedPointSemantics(DstTy);
-  return EmitFixedPointConversion(Src, SrcFPSema, DstFPSema, Loc,
-                                  DstTy->isIntegerType());
-}
-
-Value *ScalarExprEmitter::EmitFixedPointConversion(
-    Value *Src, llvm::FixedPointSemantics &SrcFPSema,
-    llvm::FixedPointSemantics &DstFPSema,
-    SourceLocation Loc, bool DstIsInteger) {
-  using llvm::APFixedPoint;
-  using llvm::APInt;
-  using llvm::ConstantInt;
-  using llvm::Value;
-
-  unsigned SrcWidth = SrcFPSema.getWidth();
-  unsigned DstWidth = DstFPSema.getWidth();
-  unsigned SrcScale = SrcFPSema.getScale();
-  unsigned DstScale = DstFPSema.getScale();
-  bool SrcIsSigned = SrcFPSema.isSigned();
-  bool DstIsSigned = DstFPSema.isSigned();
-
-  llvm::Type *DstIntTy = Builder.getIntNTy(DstWidth);
-
-  Value *Result = Src;
-  unsigned ResultWidth = SrcWidth;
-
-  // Downscale.
-  if (DstScale < SrcScale) {
-    // When converting to integers, we round towards zero. For negative numbers,
-    // right shifting rounds towards negative infinity. In this case, we can
-    // just round up before shifting.
-    if (DstIsInteger && SrcIsSigned) {
-      Value *Zero = llvm::Constant::getNullValue(Result->getType());
-      Value *IsNegative = Builder.CreateICmpSLT(Result, Zero);
-      Value *LowBits = ConstantInt::get(
-          CGF.getLLVMContext(), APInt::getLowBitsSet(ResultWidth, SrcScale));
-      Value *Rounded = Builder.CreateAdd(Result, LowBits);
-      Result = Builder.CreateSelect(IsNegative, Rounded, Result);
-    }
-
-    Result = SrcIsSigned
-                 ? Builder.CreateAShr(Result, SrcScale - DstScale, "downscale")
-                 : Builder.CreateLShr(Result, SrcScale - DstScale, "downscale");
-  }
-
-  if (!DstFPSema.isSaturated()) {
-    // Resize.
-    Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
-
-    // Upscale.
-    if (DstScale > SrcScale)
-      Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
-  } else {
-    // Adjust the number of fractional bits.
-    if (DstScale > SrcScale) {
-      // Compare to DstWidth to prevent resizing twice.
-      ResultWidth = std::max(SrcWidth + DstScale - SrcScale, DstWidth);
-      llvm::Type *UpscaledTy = Builder.getIntNTy(ResultWidth);
-      Result = Builder.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize");
-      Result = Builder.CreateShl(Result, DstScale - SrcScale, "upscale");
-    }
-
-    // Handle saturation.
-    bool LessIntBits = DstFPSema.getIntegralBits() < SrcFPSema.getIntegralBits();
-    if (LessIntBits) {
-      Value *Max = ConstantInt::get(
-          CGF.getLLVMContext(),
-          APFixedPoint::getMax(DstFPSema).getValue().extOrTrunc(ResultWidth));
-      Value *TooHigh = SrcIsSigned ? Builder.CreateICmpSGT(Result, Max)
-                                   : Builder.CreateICmpUGT(Result, Max);
-      Result = Builder.CreateSelect(TooHigh, Max, Result, "satmax");
-    }
-    // Cannot overflow min to dest type if src is unsigned since all fixed
-    // point types can cover the unsigned min of 0.
-    if (SrcIsSigned && (LessIntBits || !DstIsSigned)) {
-      Value *Min = ConstantInt::get(
-          CGF.getLLVMContext(),
-          APFixedPoint::getMin(DstFPSema).getValue().extOrTrunc(ResultWidth));
-      Value *TooLow = Builder.CreateICmpSLT(Result, Min);
-      Result = Builder.CreateSelect(TooLow, Min, Result, "satmin");
-    }
-
-    // Resize the integer part to get the final destination size.
-    if (ResultWidth != DstWidth)
-      Result = Builder.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
-  }
+  llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
+  llvm::Value *Result;
+  if (DstTy->isIntegerType())
+    Result = FPBuilder.CreateFixedToInteger(Src, SrcFPSema,
+                                            DstFPSema.getWidth(),
+                                            DstFPSema.isSigned());
+  else if (SrcTy->isIntegerType())
+    Result =  FPBuilder.CreateIntegerToFixed(Src, SrcFPSema.isSigned(),
+                                             DstFPSema);
+  else
+    Result = FPBuilder.CreateFixedToFixed(Src, SrcFPSema, DstFPSema);
   return Result;
 }
 
@@ -2668,12 +2590,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
     // Now, convert from our invented integer literal to the type of the unary
     // op. This will upscale and saturate if necessary. This value can become
     // undef in some cases.
-    auto SrcSema =
-        llvm::FixedPointSemantics::GetIntegerSemantics(
-            value->getType()->getScalarSizeInBits(), /*IsSigned=*/true);
+    llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
     auto DstSema = CGF.getContext().getFixedPointSemantics(Info.Ty);
-    Info.RHS = EmitFixedPointConversion(Info.RHS, SrcSema, DstSema,
-                                        E->getExprLoc());
+    Info.RHS = FPBuilder.CreateIntegerToFixed(Info.RHS, true, DstSema);
     value = EmitFixedPointBinOp(Info);
 
   // Objective-C pointer types.
@@ -3596,84 +3515,41 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
   auto ResultFixedSema = Ctx.getFixedPointSemantics(ResultTy);
   auto CommonFixedSema = LHSFixedSema.getCommonSemantics(RHSFixedSema);
 
-  // Convert the operands to the full precision type.
-  Value *FullLHS = EmitFixedPointConversion(LHS, LHSFixedSema, CommonFixedSema,
-                                            op.E->getExprLoc());
-  Value *FullRHS = EmitFixedPointConversion(RHS, RHSFixedSema, CommonFixedSema,
-                                            op.E->getExprLoc());
-
   // Perform the actual operation.
   Value *Result;
+  llvm::FixedPointBuilder<CGBuilderTy> FPBuilder(Builder);
   switch (op.Opcode) {
   case BO_AddAssign:
-  case BO_Add: {
-    if (CommonFixedSema.isSaturated()) {
-      llvm::Intrinsic::ID IID = CommonFixedSema.isSigned()
-                                    ? llvm::Intrinsic::sadd_sat
-                                    : llvm::Intrinsic::uadd_sat;
-      Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
-    } else {
-      Result = Builder.CreateAdd(FullLHS, FullRHS);
-    }
+  case BO_Add:
+    Result = FPBuilder.CreateAdd(LHS, LHSFixedSema, RHS, RHSFixedSema);
     break;
-  }
   case BO_SubAssign:
-  case BO_Sub: {
-    if (CommonFixedSema.isSaturated()) {
-      llvm::Intrinsic::ID IID = CommonFixedSema.isSigned()
-                                    ? llvm::Intrinsic::ssub_sat
-                                    : llvm::Intrinsic::usub_sat;
-      Result = Builder.CreateBinaryIntrinsic(IID, FullLHS, FullRHS);
-    } else {
-      Result = Builder.CreateSub(FullLHS, FullRHS);
-    }
+  case BO_Sub:
+    Result = FPBuilder.CreateSub(LHS, LHSFixedSema, RHS, RHSFixedSema);
     break;
-  }
   case BO_MulAssign:
-  case BO_Mul: {
-    llvm::Intrinsic::ID IID;
-    if (CommonFixedSema.isSaturated())
-      IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::smul_fix_sat
-                                       : llvm::Intrinsic::umul_fix_sat;
-    else
-      IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::smul_fix
-                                       : llvm::Intrinsic::umul_fix;
-    Result = Builder.CreateIntrinsic(IID, {FullLHS->getType()},
-        {FullLHS, FullRHS, Builder.getInt32(CommonFixedSema.getScale())});
+  case BO_Mul:
+    Result = FPBuilder.CreateMul(LHS, LHSFixedSema, RHS, RHSFixedSema);
     break;
-  }
   case BO_DivAssign:
-  case BO_Div: {
-    llvm::Intrinsic::ID IID;
-    if (CommonFixedSema.isSaturated())
-      IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::sdiv_fix_sat
-                                       : llvm::Intrinsic::udiv_fix_sat;
-    else
-      IID = CommonFixedSema.isSigned() ? llvm::Intrinsic::sdiv_fix
-                                       : llvm::Intrinsic::udiv_fix;
-    Result = Builder.CreateIntrinsic(IID, {FullLHS->getType()},
-        {FullLHS, FullRHS, Builder.getInt32(CommonFixedSema.getScale())});
-    break;    
-  }
+  case BO_Div:
+    Result = FPBuilder.CreateDiv(LHS, LHSFixedSema, RHS, RHSFixedSema);
+    break;
   case BO_LT:
-    return CommonFixedSema.isSigned() ? Builder.CreateICmpSLT(FullLHS, FullRHS)
-                                      : Builder.CreateICmpULT(FullLHS, FullRHS);
+    return FPBuilder.CreateLT(LHS, LHSFixedSema, RHS, RHSFixedSema);
   case BO_GT:
-    return CommonFixedSema.isSigned() ? Builder.CreateICmpSGT(FullLHS, FullRHS)
-                                      : Builder.CreateICmpUGT(FullLHS, FullRHS);
+    return FPBuilder.CreateGT(LHS, LHSFixedSema, RHS, RHSFixedSema);
   case BO_LE:
-    return CommonFixedSema.isSigned() ? Builder.CreateICmpSLE(FullLHS, FullRHS)
-                                      : Builder.CreateICmpULE(FullLHS, FullRHS);
+    return FPBuilder.CreateLE(LHS, LHSFixedSema, RHS, RHSFixedSema);
   case BO_GE:
-    return CommonFixedSema.isSigned() ? Builder.CreateICmpSGE(FullLHS, FullRHS)
-                                      : Builder.CreateICmpUGE(FullLHS, FullRHS);
+    return FPBuilder.CreateGE(LHS, LHSFixedSema, RHS, RHSFixedSema);
   case BO_EQ:
     // For equality operations, we assume any padding bits on unsigned types are
     // zero'd out. They could be overwritten through non-saturating operations
     // that cause overflow, but this leads to undefined behavior.
-    return Builder.CreateICmpEQ(FullLHS, FullRHS);
+    return FPBuilder.CreateEQ(LHS, LHSFixedSema, RHS, RHSFixedSema);
   case BO_NE:
-    return Builder.CreateICmpNE(FullLHS, FullRHS);
+    return FPBuilder.CreateNE(LHS, LHSFixedSema, RHS, RHSFixedSema);
   case BO_Shl:
   case BO_Shr:
   case BO_Cmp:
@@ -3698,8 +3574,7 @@ Value *ScalarExprEmitter::EmitFixedPointBinOp(const BinOpInfo &op) {
   }
 
   // Convert to the result type.
-  return EmitFixedPointConversion(Result, CommonFixedSema, ResultFixedSema,
-                                  op.E->getExprLoc());
+  return FPBuilder.CreateFixedToFixed(Result, CommonFixedSema, ResultFixedSema);
 }
 
 Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {

diff  --git a/clang/test/Frontend/fixed_point_add.c b/clang/test/Frontend/fixed_point_add.c
index 15132cfb712a..d01989c5eab0 100644
--- a/clang/test/Frontend/fixed_point_add.c
+++ b/clang/test/Frontend/fixed_point_add.c
@@ -444,11 +444,10 @@ void sat_sassasas() {
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP0]] to i15
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = trunc i16 [[TMP1]] to i15
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i15 @llvm.uadd.sat.i15(i15 [[RESIZE]], i15 [[RESIZE1]])
-// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i15 [[TMP2]] to i16
-// UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa_sat, align 2
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.sadd.sat.i16(i16 [[TMP0]], i16 [[TMP1]])
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP2]] to i15
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
+// UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @usa_sat, align 2
 // UNSIGNED-NEXT:    ret void
 //
 void sat_usasusausas() {
@@ -469,11 +468,11 @@ void sat_usasusausas() {
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i32, i32* @ua, align 4
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i32 [[TMP0]] to i31
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i31
-// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i31 [[RESIZE1]], 8
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i31 @llvm.uadd.sat.i31(i31 [[RESIZE]], i31 [[UPSCALE]])
-// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i31 [[TMP2]] to i32
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP1]] to i32
+// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[TMP0]], i32 [[UPSCALE]])
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = trunc i32 [[TMP2]] to i31
+// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i31 [[RESIZE1]] to i32
 // UNSIGNED-NEXT:    store i32 [[RESIZE2]], i32* @ua_sat, align 4
 // UNSIGNED-NEXT:    ret void
 //
@@ -533,11 +532,10 @@ void sat_sassasui() {
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @uf_sat, align 2
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @uf_sat, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP0]] to i15
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = trunc i16 [[TMP1]] to i15
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i15 @llvm.uadd.sat.i15(i15 [[RESIZE]], i15 [[RESIZE1]])
-// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i15 [[TMP2]] to i16
-// UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @uf_sat, align 2
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.sadd.sat.i16(i16 [[TMP0]], i16 [[TMP1]])
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP2]] to i15
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
+// UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @uf_sat, align 2
 // UNSIGNED-NEXT:    ret void
 //
 void sat_ufsufsufs() {

diff  --git a/clang/test/Frontend/fixed_point_div.c b/clang/test/Frontend/fixed_point_div.c
index b77a13cafb3f..d54ba3bf48b0 100644
--- a/clang/test/Frontend/fixed_point_div.c
+++ b/clang/test/Frontend/fixed_point_div.c
@@ -252,7 +252,7 @@ void sdiv_aaaaa() {
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa, align 2
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.udiv.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.sdiv.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
 // UNSIGNED-NEXT:    store i16 [[TMP2]], i16* @usa, align 2
 // UNSIGNED-NEXT:    ret void
 //
@@ -276,7 +276,7 @@ void udiv_usausausa() {
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, i32* @ua, align 4
 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i32
 // UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.udiv.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 15)
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.sdiv.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 15)
 // UNSIGNED-NEXT:    store i32 [[TMP2]], i32* @ua, align 4
 // UNSIGNED-NEXT:    ret void
 //
@@ -298,7 +298,7 @@ void udiv_uausaua() {
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i8, i8* @usf, align 1
 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i8 [[TMP1]] to i16
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.udiv.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 7)
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.sdiv.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 7)
 // UNSIGNED-NEXT:    store i16 [[TMP2]], i16* @usa, align 2
 // UNSIGNED-NEXT:    ret void
 //
@@ -326,7 +326,7 @@ void udiv_usausausf() {
 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i24
 // UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i24 [[RESIZE]], 8
 // UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i24
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i24 @llvm.udiv.fix.i24(i24 [[UPSCALE]], i24 [[RESIZE1]], i32 15)
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i24 @llvm.sdiv.fix.i24(i24 [[UPSCALE]], i24 [[RESIZE1]], i32 15)
 // UNSIGNED-NEXT:    [[DOWNSCALE:%.*]] = lshr i24 [[TMP2]], 8
 // UNSIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i24 [[DOWNSCALE]] to i16
 // UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa, align 2
@@ -544,11 +544,10 @@ void sat_sassasas() {
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP0]] to i15
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = trunc i16 [[TMP1]] to i15
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i15 @llvm.udiv.fix.sat.i15(i15 [[RESIZE]], i15 [[RESIZE1]], i32 7)
-// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i15 [[TMP2]] to i16
-// UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa_sat, align 2
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.sdiv.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP2]] to i15
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
+// UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @usa_sat, align 2
 // UNSIGNED-NEXT:    ret void
 //
 void sat_usasusausas() {
@@ -569,11 +568,11 @@ void sat_usasusausas() {
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i32, i32* @ua, align 4
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i32 [[TMP0]] to i31
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i31
-// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i31 [[RESIZE1]], 8
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i31 @llvm.udiv.fix.sat.i31(i31 [[RESIZE]], i31 [[UPSCALE]], i32 15)
-// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i31 [[TMP2]] to i32
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP1]] to i32
+// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.sdiv.fix.sat.i32(i32 [[TMP0]], i32 [[UPSCALE]], i32 15)
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = trunc i32 [[TMP2]] to i31
+// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i31 [[RESIZE1]] to i32
 // UNSIGNED-NEXT:    store i32 [[RESIZE2]], i32* @ua_sat, align 4
 // UNSIGNED-NEXT:    ret void
 //
@@ -633,11 +632,10 @@ void sat_sassasui() {
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @uf_sat, align 2
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @uf_sat, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP0]] to i15
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = trunc i16 [[TMP1]] to i15
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i15 @llvm.udiv.fix.sat.i15(i15 [[RESIZE]], i15 [[RESIZE1]], i32 15)
-// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i15 [[TMP2]] to i16
-// UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @uf_sat, align 2
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.sdiv.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 15)
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP2]] to i15
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
+// UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @uf_sat, align 2
 // UNSIGNED-NEXT:    ret void
 //
 void sat_ufsufsufs() {

diff  --git a/clang/test/Frontend/fixed_point_mul.c b/clang/test/Frontend/fixed_point_mul.c
index 777c35c52d4a..eeb80dd08d94 100644
--- a/clang/test/Frontend/fixed_point_mul.c
+++ b/clang/test/Frontend/fixed_point_mul.c
@@ -252,7 +252,7 @@ void smul_aaaaa() {
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa, align 2
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.umul.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
 // UNSIGNED-NEXT:    store i16 [[TMP2]], i16* @usa, align 2
 // UNSIGNED-NEXT:    ret void
 //
@@ -276,7 +276,7 @@ void umul_usausausa() {
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i32, i32* @ua, align 4
 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i32
 // UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.umul.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 15)
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.smul.fix.i32(i32 [[UPSCALE]], i32 [[TMP1]], i32 15)
 // UNSIGNED-NEXT:    store i32 [[TMP2]], i32* @ua, align 4
 // UNSIGNED-NEXT:    ret void
 //
@@ -298,7 +298,7 @@ void umul_uausaua() {
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i8, i8* @usf, align 1
 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i8 [[TMP1]] to i16
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.umul.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 7)
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smul.fix.i16(i16 [[TMP0]], i16 [[RESIZE]], i32 7)
 // UNSIGNED-NEXT:    store i16 [[TMP2]], i16* @usa, align 2
 // UNSIGNED-NEXT:    ret void
 //
@@ -326,7 +326,7 @@ void umul_usausausf() {
 // UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP0]] to i24
 // UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i24 [[RESIZE]], 8
 // UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i24
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i24 @llvm.umul.fix.i24(i24 [[UPSCALE]], i24 [[RESIZE1]], i32 15)
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i24 @llvm.smul.fix.i24(i24 [[UPSCALE]], i24 [[RESIZE1]], i32 15)
 // UNSIGNED-NEXT:    [[DOWNSCALE:%.*]] = lshr i24 [[TMP2]], 8
 // UNSIGNED-NEXT:    [[RESIZE2:%.*]] = trunc i24 [[DOWNSCALE]] to i16
 // UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa, align 2
@@ -544,11 +544,10 @@ void sat_sassasas() {
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP0]] to i15
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = trunc i16 [[TMP1]] to i15
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i15 @llvm.umul.fix.sat.i15(i15 [[RESIZE]], i15 [[RESIZE1]], i32 7)
-// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i15 [[TMP2]] to i16
-// UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa_sat, align 2
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 7)
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP2]] to i15
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
+// UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @usa_sat, align 2
 // UNSIGNED-NEXT:    ret void
 //
 void sat_usasusausas() {
@@ -569,11 +568,11 @@ void sat_usasusausas() {
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i32, i32* @ua, align 4
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i32 [[TMP0]] to i31
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i31
-// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i31 [[RESIZE1]], 8
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i31 @llvm.umul.fix.sat.i31(i31 [[RESIZE]], i31 [[UPSCALE]], i32 15)
-// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i31 [[TMP2]] to i32
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP1]] to i32
+// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.smul.fix.sat.i32(i32 [[TMP0]], i32 [[UPSCALE]], i32 15)
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = trunc i32 [[TMP2]] to i31
+// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i31 [[RESIZE1]] to i32
 // UNSIGNED-NEXT:    store i32 [[RESIZE2]], i32* @ua_sat, align 4
 // UNSIGNED-NEXT:    ret void
 //
@@ -633,11 +632,10 @@ void sat_sassasui() {
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @uf_sat, align 2
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @uf_sat, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP0]] to i15
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = trunc i16 [[TMP1]] to i15
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i15 @llvm.umul.fix.sat.i15(i15 [[RESIZE]], i15 [[RESIZE1]], i32 15)
-// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i15 [[TMP2]] to i16
-// UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @uf_sat, align 2
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.smul.fix.sat.i16(i16 [[TMP0]], i16 [[TMP1]], i32 15)
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP2]] to i15
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
+// UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @uf_sat, align 2
 // UNSIGNED-NEXT:    ret void
 //
 void sat_ufsufsufs() {

diff  --git a/clang/test/Frontend/fixed_point_sub.c b/clang/test/Frontend/fixed_point_sub.c
index 4d07b4a52257..6446d76fbaa5 100644
--- a/clang/test/Frontend/fixed_point_sub.c
+++ b/clang/test/Frontend/fixed_point_sub.c
@@ -444,11 +444,12 @@ void sat_sassasas() {
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @usa, align 2
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP0]] to i15
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = trunc i16 [[TMP1]] to i15
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i15 @llvm.usub.sat.i15(i15 [[RESIZE]], i15 [[RESIZE1]])
-// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i15 [[TMP2]] to i16
-// UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @usa_sat, align 2
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.ssub.sat.i16(i16 [[TMP0]], i16 [[TMP1]])
+// UNSIGNED-NEXT:    [[TMP3:%.*]] = icmp slt i16 [[TMP2]], 0
+// UNSIGNED-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP3]], i16 0, i16 [[TMP2]]
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[SATMIN]] to i15
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
+// UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @usa_sat, align 2
 // UNSIGNED-NEXT:    ret void
 //
 void sat_usasusausas() {
@@ -469,11 +470,13 @@ void sat_usasusausas() {
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i32, i32* @ua, align 4
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @usa_sat, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i32 [[TMP0]] to i31
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i16 [[TMP1]] to i31
-// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i31 [[RESIZE1]], 8
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i31 @llvm.usub.sat.i31(i31 [[RESIZE]], i31 [[UPSCALE]])
-// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i31 [[TMP2]] to i32
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = zext i16 [[TMP1]] to i32
+// UNSIGNED-NEXT:    [[UPSCALE:%.*]] = shl i32 [[RESIZE]], 8
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[TMP0]], i32 [[UPSCALE]])
+// UNSIGNED-NEXT:    [[TMP3:%.*]] = icmp slt i32 [[TMP2]], 0
+// UNSIGNED-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP3]], i32 0, i32 [[TMP2]]
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = trunc i32 [[SATMIN]] to i31
+// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i31 [[RESIZE1]] to i32
 // UNSIGNED-NEXT:    store i32 [[RESIZE2]], i32* @ua_sat, align 4
 // UNSIGNED-NEXT:    ret void
 //
@@ -533,11 +536,12 @@ void sat_sassasui() {
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @uf_sat, align 2
 // UNSIGNED-NEXT:    [[TMP1:%.*]] = load i16, i16* @uf_sat, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP0]] to i15
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = trunc i16 [[TMP1]] to i15
-// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i15 @llvm.usub.sat.i15(i15 [[RESIZE]], i15 [[RESIZE1]])
-// UNSIGNED-NEXT:    [[RESIZE2:%.*]] = zext i15 [[TMP2]] to i16
-// UNSIGNED-NEXT:    store i16 [[RESIZE2]], i16* @uf_sat, align 2
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = call i16 @llvm.ssub.sat.i16(i16 [[TMP0]], i16 [[TMP1]])
+// UNSIGNED-NEXT:    [[TMP3:%.*]] = icmp slt i16 [[TMP2]], 0
+// UNSIGNED-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP3]], i16 0, i16 [[TMP2]]
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[SATMIN]] to i15
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
+// UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @uf_sat, align 2
 // UNSIGNED-NEXT:    ret void
 //
 void sat_ufsufsufs() {

diff  --git a/clang/test/Frontend/fixed_point_unary.c b/clang/test/Frontend/fixed_point_unary.c
index 84c6654fe7af..849e38a94bc4 100644
--- a/clang/test/Frontend/fixed_point_unary.c
+++ b/clang/test/Frontend/fixed_point_unary.c
@@ -148,9 +148,9 @@ void inc_slf() {
 // UNSIGNED-LABEL: @inc_sua(
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i32, i32* @sua, align 4
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i32 [[TMP0]] to i31
-// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i31 @llvm.uadd.sat.i31(i31 [[RESIZE]], i31 32768)
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i31 [[TMP1]] to i32
+// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i32 @llvm.sadd.sat.i32(i32 [[TMP0]], i32 32768)
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i32 [[TMP1]] to i31
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i31 [[RESIZE]] to i32
 // UNSIGNED-NEXT:    store i32 [[RESIZE1]], i32* @sua, align 4
 // UNSIGNED-NEXT:    ret void
 //
@@ -168,9 +168,9 @@ void inc_sua() {
 // UNSIGNED-LABEL: @inc_susa(
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @susa, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP0]] to i15
-// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i15 @llvm.uadd.sat.i15(i15 [[RESIZE]], i15 128)
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[TMP1]] to i16
+// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i16 @llvm.sadd.sat.i16(i16 [[TMP0]], i16 128)
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP1]] to i15
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
 // UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @susa, align 2
 // UNSIGNED-NEXT:    ret void
 //
@@ -188,9 +188,9 @@ void inc_susa() {
 // UNSIGNED-LABEL: @inc_suf(
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @suf, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP0]] to i15
-// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i15 @llvm.uadd.sat.i15(i15 [[RESIZE]], i15 -1)
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[TMP1]] to i16
+// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i16 @llvm.sadd.sat.i16(i16 [[TMP0]], i16 32767)
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP1]] to i15
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
 // UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @suf, align 2
 // UNSIGNED-NEXT:    ret void
 //
@@ -329,9 +329,11 @@ void dec_slf() {
 // UNSIGNED-LABEL: @dec_sua(
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i32, i32* @sua, align 4
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i32 [[TMP0]] to i31
-// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i31 @llvm.usub.sat.i31(i31 [[RESIZE]], i31 32768)
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i31 [[TMP1]] to i32
+// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i32 @llvm.ssub.sat.i32(i32 [[TMP0]], i32 32768)
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = icmp slt i32 [[TMP1]], 0
+// UNSIGNED-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP2]], i32 0, i32 [[TMP1]]
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i32 [[SATMIN]] to i31
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i31 [[RESIZE]] to i32
 // UNSIGNED-NEXT:    store i32 [[RESIZE1]], i32* @sua, align 4
 // UNSIGNED-NEXT:    ret void
 //
@@ -349,9 +351,11 @@ void dec_sua() {
 // UNSIGNED-LABEL: @dec_susa(
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @susa, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP0]] to i15
-// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i15 @llvm.usub.sat.i15(i15 [[RESIZE]], i15 128)
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[TMP1]] to i16
+// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i16 @llvm.ssub.sat.i16(i16 [[TMP0]], i16 128)
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = icmp slt i16 [[TMP1]], 0
+// UNSIGNED-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP2]], i16 0, i16 [[TMP1]]
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[SATMIN]] to i15
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
 // UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @susa, align 2
 // UNSIGNED-NEXT:    ret void
 //
@@ -369,9 +373,11 @@ void dec_susa() {
 // UNSIGNED-LABEL: @dec_suf(
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @suf, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP0]] to i15
-// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i15 @llvm.usub.sat.i15(i15 [[RESIZE]], i15 -1)
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[TMP1]] to i16
+// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i16 @llvm.ssub.sat.i16(i16 [[TMP0]], i16 32767)
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = icmp slt i16 [[TMP1]], 0
+// UNSIGNED-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP2]], i16 0, i16 [[TMP1]]
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[SATMIN]] to i15
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
 // UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @suf, align 2
 // UNSIGNED-NEXT:    ret void
 //
@@ -456,9 +462,11 @@ void neg_sf() {
 // UNSIGNED-LABEL: @neg_susa(
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @susa, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP0]] to i15
-// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i15 @llvm.usub.sat.i15(i15 0, i15 [[RESIZE]])
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[TMP1]] to i16
+// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i16 @llvm.ssub.sat.i16(i16 0, i16 [[TMP0]])
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = icmp slt i16 [[TMP1]], 0
+// UNSIGNED-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP2]], i16 0, i16 [[TMP1]]
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[SATMIN]] to i15
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
 // UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @susa, align 2
 // UNSIGNED-NEXT:    ret void
 //
@@ -476,9 +484,11 @@ void neg_susa() {
 // UNSIGNED-LABEL: @neg_suf(
 // UNSIGNED-NEXT:  entry:
 // UNSIGNED-NEXT:    [[TMP0:%.*]] = load i16, i16* @suf, align 2
-// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[TMP0]] to i15
-// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i15 @llvm.usub.sat.i15(i15 0, i15 [[RESIZE]])
-// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[TMP1]] to i16
+// UNSIGNED-NEXT:    [[TMP1:%.*]] = call i16 @llvm.ssub.sat.i16(i16 0, i16 [[TMP0]])
+// UNSIGNED-NEXT:    [[TMP2:%.*]] = icmp slt i16 [[TMP1]], 0
+// UNSIGNED-NEXT:    [[SATMIN:%.*]] = select i1 [[TMP2]], i16 0, i16 [[TMP1]]
+// UNSIGNED-NEXT:    [[RESIZE:%.*]] = trunc i16 [[SATMIN]] to i15
+// UNSIGNED-NEXT:    [[RESIZE1:%.*]] = zext i15 [[RESIZE]] to i16
 // UNSIGNED-NEXT:    store i16 [[RESIZE1]], i16* @suf, align 2
 // UNSIGNED-NEXT:    ret void
 //


        


More information about the cfe-commits mailing list