[llvm] goldsteinn/fp binop folds (PR #82555)

via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 21 16:17:04 PST 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-llvm-transforms

Author: None (goldsteinn)

<details>
<summary>Changes</summary>

- **[InstCombine] Add tests for folding `(fp_binop ({s|u}itofp x)**
- **[InstCombine] Move folding `(add (sitofp x)**
- **[InstCombine] Add folds for `(fp_binop ({s|u}itofp x)**


---

Patch is 49.29 KiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/82555.diff


7 Files Affected:

- (modified) llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp (+6-57) 
- (modified) llvm/lib/Transforms/InstCombine/InstCombineInternal.h (+1) 
- (modified) llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp (+3) 
- (modified) llvm/lib/Transforms/InstCombine/InstructionCombining.cpp (+165) 
- (modified) llvm/test/Transforms/InstCombine/add-sitofp.ll (+10-10) 
- (added) llvm/test/Transforms/InstCombine/binop-itofp.ll (+998) 
- (modified) llvm/test/Transforms/InstCombine/pr33453.ll (+1-3) 


``````````diff
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
index cfff5df9ff5005..f0ca83bb225f1f 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAddSub.cpp
@@ -1867,64 +1867,10 @@ Instruction *InstCombinerImpl::visitFAdd(BinaryOperator &I) {
 
   // Check for (fadd double (sitofp x), y), see if we can merge this into an
   // integer add followed by a promotion.
-  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
-  if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
-    Value *LHSIntVal = LHSConv->getOperand(0);
-    Type *FPType = LHSConv->getType();
-
-    // TODO: This check is overly conservative. In many cases known bits
-    // analysis can tell us that the result of the addition has less significant
-    // bits than the integer type can hold.
-    auto IsValidPromotion = [](Type *FTy, Type *ITy) {
-      Type *FScalarTy = FTy->getScalarType();
-      Type *IScalarTy = ITy->getScalarType();
-
-      // Do we have enough bits in the significand to represent the result of
-      // the integer addition?
-      unsigned MaxRepresentableBits =
-          APFloat::semanticsPrecision(FScalarTy->getFltSemantics());
-      return IScalarTy->getIntegerBitWidth() <= MaxRepresentableBits;
-    };
-
-    // (fadd double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
-    // ... if the constant fits in the integer value.  This is useful for things
-    // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
-    // requires a constant pool load, and generally allows the add to be better
-    // instcombined.
-    if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS))
-      if (IsValidPromotion(FPType, LHSIntVal->getType())) {
-        Constant *CI = ConstantFoldCastOperand(Instruction::FPToSI, CFP,
-                                               LHSIntVal->getType(), DL);
-        if (LHSConv->hasOneUse() &&
-            ConstantFoldCastOperand(Instruction::SIToFP, CI, I.getType(), DL) ==
-                CFP &&
-            willNotOverflowSignedAdd(LHSIntVal, CI, I)) {
-          // Insert the new integer add.
-          Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, CI, "addconv");
-          return new SIToFPInst(NewAdd, I.getType());
-        }
-      }
-
-    // (fadd double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
-    if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
-      Value *RHSIntVal = RHSConv->getOperand(0);
-      // It's enough to check LHS types only because we require int types to
-      // be the same for this transform.
-      if (IsValidPromotion(FPType, LHSIntVal->getType())) {
-        // Only do this if x/y have the same type, if at least one of them has a
-        // single use (so we don't increase the number of int->fp conversions),
-        // and if the integer add will not overflow.
-        if (LHSIntVal->getType() == RHSIntVal->getType() &&
-            (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
-            willNotOverflowSignedAdd(LHSIntVal, RHSIntVal, I)) {
-          // Insert the new integer add.
-          Value *NewAdd = Builder.CreateNSWAdd(LHSIntVal, RHSIntVal, "addconv");
-          return new SIToFPInst(NewAdd, I.getType());
-        }
-      }
-    }
-  }
+  if (Instruction *R = foldFBinOpOfIntCasts(I))
+    return R;
 
+  Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
   // Handle specials cases for FAdd with selects feeding the operation
   if (Value *V = SimplifySelectsFeedingBinaryOp(I, LHS, RHS))
     return replaceInstUsesWith(I, V);
@@ -2832,6 +2778,9 @@ Instruction *InstCombinerImpl::visitFSub(BinaryOperator &I) {
   if (Instruction *X = foldFNegIntoConstant(I, DL))
     return X;
 
+  if (Instruction *R = foldFBinOpOfIntCasts(I))
+    return R;
+
   Value *X, *Y;
   Constant *C;
 
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
index 0b4283bc37650a..57148d719d9b61 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
+++ b/llvm/lib/Transforms/InstCombine/InstCombineInternal.h
@@ -379,6 +379,7 @@ class LLVM_LIBRARY_VISIBILITY InstCombinerImpl final
   Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN);
   Instruction *foldBitcastExtElt(ExtractElementInst &ExtElt);
   Instruction *foldCastedBitwiseLogic(BinaryOperator &I);
+  Instruction *foldFBinOpOfIntCasts(BinaryOperator &I);
   Instruction *foldBinopOfSextBoolToSelect(BinaryOperator &I);
   Instruction *narrowBinOp(TruncInst &Trunc);
   Instruction *narrowMaskedBinOp(BinaryOperator &And);
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
index 0bd4b6d1a835af..3ebf6b3d9bf7f7 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp
@@ -769,6 +769,9 @@ Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
   if (Instruction *R = foldFPSignBitOps(I))
     return R;
 
+  if (Instruction *R = foldFBinOpOfIntCasts(I))
+    return R;
+
   // X * -1.0 --> -X
   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
   if (match(Op1, m_SpecificFP(-1.0)))
diff --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index 4af455c37c788c..e5bbf59151bf9a 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -1401,6 +1401,171 @@ Value *InstCombinerImpl::dyn_castNegVal(Value *V) const {
   return nullptr;
 }
 
+// Try to fold:
+//    1) (fp_binop ({s|u}itofp x), ({s|u}itofp y))
+//        -> ({s|u}itofp (int_binop x, y))
+//    2) (fp_binop ({s|u}itofp x), FpC)
+//        -> ({s|u}itofp (int_binop x, (fpto{s|u}i FpC)))
+Instruction *InstCombinerImpl::foldFBinOpOfIntCasts(BinaryOperator &BO) {
+  Value *IntOps[2];
+  Constant *Op1FpC = nullptr;
+
+  // Check for:
+  //    1) (binop ({s|u}itofp x), ({s|u}itofp y))
+  //    2) (binop ({s|u}itofp x), FpC)
+  if (!match(BO.getOperand(0), m_SIToFP(m_Value(IntOps[0]))) &&
+      !match(BO.getOperand(0), m_UIToFP(m_Value(IntOps[0]))))
+    return nullptr;
+
+  if (!match(BO.getOperand(1), m_Constant(Op1FpC)) &&
+      !match(BO.getOperand(1), m_SIToFP(m_Value(IntOps[1]))) &&
+      !match(BO.getOperand(1), m_UIToFP(m_Value(IntOps[1]))))
+    return nullptr;
+
+
+  Type *FPTy = BO.getType();
+  Type *IntTy = IntOps[0]->getType();
+
+  // Do we have signed casts?
+  bool OpsFromSigned = isa<SIToFPInst>(BO.getOperand(0));
+
+
+  unsigned IntSz = IntTy->getScalarSizeInBits();
+  // This is the maximum number of inuse bits by the integer where the int -> fp
+  // casts are exact.
+  unsigned MaxRepresentableBits =
+      APFloat::semanticsPrecision(FPTy->getScalarType()->getFltSemantics());
+
+  // Cache KnownBits a bit to potentially save some analysis.
+  std::optional<KnownBits> OpsKnown[2];
+
+  // Preserve known number of leading bits. This can allow us to trivial nsw/nuw
+  // checks later on.
+  unsigned NumUsedLeadingBits[2] = {IntSz, IntSz};
+
+  auto IsNonZero = [&](unsigned OpNo) -> bool {
+    if (OpsKnown[OpNo].has_value() && OpsKnown[OpNo]->isNonZero())
+      return true;
+    return isKnownNonZero(IntOps[OpNo], SQ.DL);
+  };
+
+  auto IsNonNeg = [&](unsigned OpNo) -> bool {
+    if (OpsKnown[OpNo].has_value() && OpsKnown[OpNo]->isNonNegative())
+      return true;
+    return isKnownNonNegative(IntOps[OpNo], SQ);
+  };
+
+  // Check if we know for certain that ({s|u}itofp op) is exact.
+  auto IsValidPromotion = [&](unsigned OpNo) -> bool {
+    // If fp precision >= bitwidth(op) then its exact.
+    if (MaxRepresentableBits >= IntSz)
+      ;
+    // Otherwise if its signed cast check that fp precisions >= bitwidth(op) -
+    // numSignBits(op).
+    else if (OpsFromSigned)
+      NumUsedLeadingBits[OpNo] = IntSz - ComputeNumSignBits(IntOps[OpNo]);
+    // Finally for unsigned check that fp precision >= bitwidth(op) -
+    // numLeadingZeros(op).
+    else {
+      if (!OpsKnown[OpNo].has_value())
+        OpsKnown[OpNo] = computeKnownBits(IntOps[OpNo], /*Depth*/ 0, &BO);
+      NumUsedLeadingBits[OpNo] = IntSz - OpsKnown[OpNo]->countMinLeadingZeros();
+    }
+    // NB: We could also check if op is known to be a power of 2 or zero (which
+    // will always be representable). Its unlikely, however, that is we are
+    // unable to bound op in any way we will be able to pass the overflow checks
+    // later on.
+
+    if (MaxRepresentableBits < NumUsedLeadingBits[OpNo])
+      return false;
+    // Signed + Mul also requires that op is non-zero to avoid -0 cases.
+    return (OpsFromSigned && BO.getOpcode() == Instruction::FMul)
+               ? IsNonZero(OpNo)
+               : true;
+
+  };
+
+  // If we have a constant rhs, see if we can losslessly convert it to an int.
+  if (Op1FpC != nullptr) {
+    Constant *Op1IntC = ConstantFoldCastOperand(
+        OpsFromSigned ? Instruction::FPToSI : Instruction::FPToUI, Op1FpC,
+        IntTy, DL);
+    if (Op1IntC == nullptr)
+      return nullptr;
+    if (ConstantFoldCastOperand(OpsFromSigned ? Instruction::SIToFP
+                                              : Instruction::UIToFP,
+                                Op1IntC, FPTy, DL) != Op1FpC)
+      return nullptr;
+
+    // First try to keep sign of cast the same.
+    IntOps[1] = Op1IntC;
+  }
+
+  // Ensure lhs/rhs integer types match.
+  if (IntTy != IntOps[1]->getType())
+    return nullptr;
+
+
+  if (Op1FpC == nullptr) {
+    if (OpsFromSigned != isa<SIToFPInst>(BO.getOperand(1))) {
+      // If we have a signed + unsigned, see if we can treat both as signed
+      // (uitofp nneg x) == (sitofp nneg x).
+      if (OpsFromSigned ? !IsNonNeg(1) : !IsNonNeg(0))
+        return nullptr;
+      OpsFromSigned = true;
+    }
+    if (!IsValidPromotion(1))
+      return nullptr;
+  }
+  if (!IsValidPromotion(0))
+    return nullptr;
+
+  // Final we check if the integer version of the binop will not overflow.
+  BinaryOperator::BinaryOps IntOpc;
+  // Because of the precision check, we can often rule out overflows.
+  bool NeedsOverflowCheck = true;
+  // Try to conservatively rule out overflow based on the already done precision
+  // checks.
+  unsigned OverflowMaxOutputBits = OpsFromSigned ? 2 : 1;
+  unsigned OverflowMaxCurBits =
+      std::max(NumUsedLeadingBits[0], NumUsedLeadingBits[1]);
+  bool OutputSigned = OpsFromSigned;
+  switch (BO.getOpcode()) {
+  case Instruction::FAdd:
+    IntOpc = Instruction::Add;
+    OverflowMaxOutputBits += OverflowMaxCurBits;
+    break;
+  case Instruction::FSub:
+    IntOpc = Instruction::Sub;
+    OverflowMaxOutputBits += OverflowMaxCurBits;
+    break;
+  case Instruction::FMul:
+    IntOpc = Instruction::Mul;
+    OverflowMaxOutputBits += OverflowMaxCurBits * 2;
+    break;
+  default:
+    llvm_unreachable("Unsupported binop");
+  }
+  // The precision check may have already ruled out overflow.
+  if (OverflowMaxOutputBits < IntSz) {
+    NeedsOverflowCheck = false;
+    // We can bound unsigned overflow from sub to in range signed value (this is
+    // what allows us to avoid the overflow check for sub).
+    if (IntOpc == Instruction::Sub)
+      OutputSigned = true;
+  }
+
+  // Precision check did not rule out overflow, so need to check.
+  if (NeedsOverflowCheck &&
+      !willNotOverflow(IntOpc, IntOps[0], IntOps[1], BO, OutputSigned))
+    return nullptr;
+
+  Value *IntBinOp = Builder.CreateBinOp(IntOpc, IntOps[0], IntOps[1]);
+  if (OutputSigned)
+    return new SIToFPInst(IntBinOp, FPTy);
+  return new UIToFPInst(IntBinOp, FPTy);
+}
+
 /// A binop with a constant operand and a sign-extended boolean operand may be
 /// converted into a select of constants by applying the binary operation to
 /// the constant with the two possible values of the extended boolean (0 or -1).
diff --git a/llvm/test/Transforms/InstCombine/add-sitofp.ll b/llvm/test/Transforms/InstCombine/add-sitofp.ll
index db44b806593b64..bb97e9002c29b8 100644
--- a/llvm/test/Transforms/InstCombine/add-sitofp.ll
+++ b/llvm/test/Transforms/InstCombine/add-sitofp.ll
@@ -5,8 +5,8 @@ define double @x(i32 %a, i32 %b) {
 ; CHECK-LABEL: @x(
 ; CHECK-NEXT:    [[M:%.*]] = lshr i32 [[A:%.*]], 24
 ; CHECK-NEXT:    [[N:%.*]] = and i32 [[M]], [[B:%.*]]
-; CHECK-NEXT:    [[ADDCONV:%.*]] = add nuw nsw i32 [[N]], 1
-; CHECK-NEXT:    [[P:%.*]] = sitofp i32 [[ADDCONV]] to double
+; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i32 [[N]], 1
+; CHECK-NEXT:    [[P:%.*]] = sitofp i32 [[TMP1]] to double
 ; CHECK-NEXT:    ret double [[P]]
 ;
   %m = lshr i32 %a, 24
@@ -19,8 +19,8 @@ define double @x(i32 %a, i32 %b) {
 define double @test(i32 %a) {
 ; CHECK-LABEL: @test(
 ; CHECK-NEXT:    [[A_AND:%.*]] = and i32 [[A:%.*]], 1073741823
-; CHECK-NEXT:    [[ADDCONV:%.*]] = add nuw nsw i32 [[A_AND]], 1
-; CHECK-NEXT:    [[RES:%.*]] = sitofp i32 [[ADDCONV]] to double
+; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i32 [[A_AND]], 1
+; CHECK-NEXT:    [[RES:%.*]] = sitofp i32 [[TMP1]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   ; Drop two highest bits to guarantee that %a + 1 doesn't overflow
@@ -48,8 +48,8 @@ define double @test_2(i32 %a, i32 %b) {
 ; CHECK-LABEL: @test_2(
 ; CHECK-NEXT:    [[A_AND:%.*]] = and i32 [[A:%.*]], 1073741823
 ; CHECK-NEXT:    [[B_AND:%.*]] = and i32 [[B:%.*]], 1073741823
-; CHECK-NEXT:    [[ADDCONV:%.*]] = add nuw nsw i32 [[A_AND]], [[B_AND]]
-; CHECK-NEXT:    [[RES:%.*]] = sitofp i32 [[ADDCONV]] to double
+; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i32 [[A_AND]], [[B_AND]]
+; CHECK-NEXT:    [[RES:%.*]] = sitofp i32 [[TMP1]] to double
 ; CHECK-NEXT:    ret double [[RES]]
 ;
   ; Drop two highest bits to guarantee that %a + %b doesn't overflow
@@ -90,8 +90,8 @@ define float @test_3(i32 %a, i32 %b) {
 ; CHECK-LABEL: @test_3(
 ; CHECK-NEXT:    [[M:%.*]] = lshr i32 [[A:%.*]], 24
 ; CHECK-NEXT:    [[N:%.*]] = and i32 [[M]], [[B:%.*]]
-; CHECK-NEXT:    [[O:%.*]] = sitofp i32 [[N]] to float
-; CHECK-NEXT:    [[P:%.*]] = fadd float [[O]], 1.000000e+00
+; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i32 [[N]], 1
+; CHECK-NEXT:    [[P:%.*]] = sitofp i32 [[TMP1]] to float
 ; CHECK-NEXT:    ret float [[P]]
 ;
   %m = lshr i32 %a, 24
@@ -105,8 +105,8 @@ define <4 x double> @test_4(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: @test_4(
 ; CHECK-NEXT:    [[A_AND:%.*]] = and <4 x i32> [[A:%.*]], <i32 1073741823, i32 1073741823, i32 1073741823, i32 1073741823>
 ; CHECK-NEXT:    [[B_AND:%.*]] = and <4 x i32> [[B:%.*]], <i32 1073741823, i32 1073741823, i32 1073741823, i32 1073741823>
-; CHECK-NEXT:    [[ADDCONV:%.*]] = add nuw nsw <4 x i32> [[A_AND]], [[B_AND]]
-; CHECK-NEXT:    [[RES:%.*]] = sitofp <4 x i32> [[ADDCONV]] to <4 x double>
+; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw <4 x i32> [[A_AND]], [[B_AND]]
+; CHECK-NEXT:    [[RES:%.*]] = sitofp <4 x i32> [[TMP1]] to <4 x double>
 ; CHECK-NEXT:    ret <4 x double> [[RES]]
 ;
   ; Drop two highest bits to guarantee that %a + %b doesn't overflow
diff --git a/llvm/test/Transforms/InstCombine/binop-itofp.ll b/llvm/test/Transforms/InstCombine/binop-itofp.ll
new file mode 100644
index 00000000000000..ffa89374579145
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/binop-itofp.ll
@@ -0,0 +1,998 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+define half @test_ui_ui_i8_add(i8 noundef %x_in, i8 noundef %y_in) {
+; CHECK-LABEL: @test_ui_ui_i8_add(
+; CHECK-NEXT:    [[X:%.*]] = and i8 [[X_IN:%.*]], 127
+; CHECK-NEXT:    [[Y:%.*]] = and i8 [[Y_IN:%.*]], 127
+; CHECK-NEXT:    [[TMP1:%.*]] = add nuw i8 [[X]], [[Y]]
+; CHECK-NEXT:    [[R:%.*]] = uitofp i8 [[TMP1]] to half
+; CHECK-NEXT:    ret half [[R]]
+;
+  %x = and i8 %x_in, 127
+  %y = and i8 %y_in, 127
+  %xf = uitofp i8 %x to half
+  %yf = uitofp i8 %y to half
+  %r = fadd half %xf, %yf
+  ret half %r
+}
+
+define half @test_ui_ui_i8_add_fail_overflow(i8 noundef %x_in, i8 noundef %y_in) {
+; CHECK-LABEL: @test_ui_ui_i8_add_fail_overflow(
+; CHECK-NEXT:    [[X:%.*]] = and i8 [[X_IN:%.*]], 127
+; CHECK-NEXT:    [[Y:%.*]] = and i8 [[Y_IN:%.*]], -127
+; CHECK-NEXT:    [[XF:%.*]] = uitofp i8 [[X]] to half
+; CHECK-NEXT:    [[YF:%.*]] = uitofp i8 [[Y]] to half
+; CHECK-NEXT:    [[R:%.*]] = fadd half [[XF]], [[YF]]
+; CHECK-NEXT:    ret half [[R]]
+;
+  %x = and i8 %x_in, 127
+  %y = and i8 %y_in, 129
+  %xf = uitofp i8 %x to half
+  %yf = uitofp i8 %y to half
+  %r = fadd half %xf, %yf
+  ret half %r
+}
+
+define half @test_ui_ui_i8_add_C(i8 noundef %x_in) {
+; CHECK-LABEL: @test_ui_ui_i8_add_C(
+; CHECK-NEXT:    [[TMP1:%.*]] = or i8 [[X_IN:%.*]], -128
+; CHECK-NEXT:    [[R:%.*]] = uitofp i8 [[TMP1]] to half
+; CHECK-NEXT:    ret half [[R]]
+;
+  %x = and i8 %x_in, 127
+  %xf = uitofp i8 %x to half
+  %r = fadd half %xf, 128.0
+  ret half %r
+}
+
+define half @test_ui_ui_i8_add_C_fail_no_repr(i8 noundef %x_in) {
+; CHECK-LABEL: @test_ui_ui_i8_add_C_fail_no_repr(
+; CHECK-NEXT:    [[X:%.*]] = and i8 [[X_IN:%.*]], 127
+; CHECK-NEXT:    [[XF:%.*]] = uitofp i8 [[X]] to half
+; CHECK-NEXT:    [[R:%.*]] = fadd half [[XF]], 0xH57F8
+; CHECK-NEXT:    ret half [[R]]
+;
+  %x = and i8 %x_in, 127
+  %xf = uitofp i8 %x to half
+  %r = fadd half %xf, 127.5
+  ret half %r
+}
+
+define half @test_ui_ui_i8_add_C_fail_overflow(i8 noundef %x_in) {
+; CHECK-LABEL: @test_ui_ui_i8_add_C_fail_overflow(
+; CHECK-NEXT:    [[X:%.*]] = and i8 [[X_IN:%.*]], 127
+; CHECK-NEXT:    [[XF:%.*]] = uitofp i8 [[X]] to half
+; CHECK-NEXT:    [[R:%.*]] = fadd half [[XF]], 0xH5808
+; CHECK-NEXT:    ret half [[R]]
+;
+  %x = and i8 %x_in, 127
+  %xf = uitofp i8 %x to half
+  %r = fadd half %xf, 129.0
+  ret half %r
+}
+
+define half @test_si_si_i8_add(i8 noundef %x_in, i8 noundef %y_in) {
+; CHECK-LABEL: @test_si_si_i8_add(
+; CHECK-NEXT:    [[X:%.*]] = or i8 [[X_IN:%.*]], -64
+; CHECK-NEXT:    [[Y:%.*]] = or i8 [[Y_IN:%.*]], -64
+; CHECK-NEXT:    [[TMP1:%.*]] = add nsw i8 [[X]], [[Y]]
+; CHECK-NEXT:    [[R:%.*]] = sitofp i8 [[TMP1]] to half
+; CHECK-NEXT:    ret half [[R]]
+;
+  %x = or i8 %x_in, -64
+  %y = or i8 %y_in, -64
+  %xf = sitofp i8 %x to half
+  %yf = sitofp i8 %y to half
+  %r = fadd half %xf, %yf
+  ret half %r
+}
+
+define half @test_si_si_i8_add_fail_overflow(i8 noundef %x_in, i8 noundef %y_in) {
+; CHECK-LABEL: @test_si_si_i8_add_fail_overflow(
+; CHECK-NEXT:    [[X:%.*]] = or i8 [[X_IN:%.*]], -64
+; CHECK-NEXT:    [[Y:%.*]] = or i8 [[Y_IN:%.*]], -65
+; CHECK-NEXT:    [[XF:%.*]] = sitofp i8 [[X]] to half
+; CHECK-NEXT:    [[YF:%.*]] = sitofp i8 [[Y]] to half
+; CHECK-NEXT:    [[R:%.*]] = fadd half [[XF]], [[YF]]
+; CHECK-NEXT:    ret half [[R]]
+;
+  %x = or i8 %x_in, -64
+  %y = or i8 %y_in, -65
+  %xf = sitofp i8 %x to half
+  %yf = sitofp i8 %y to half
+  %r = fadd half %xf, %yf
+  ret half %r
+}
+
+define half @test_ui_si_i8_add(i8 noundef %x_in, i8 noundef %y_in) {
+; CHECK-LABEL: @test_ui_si_i8_add(
+; CHECK-NEXT:    [[X:%.*]] = and i8 [[X_IN:%.*]], 63
+; CHECK-NEXT:    [[Y:%.*]] = and i8 [[Y_IN:%.*]], 63
+; CHECK-NEXT:    [[TMP1:%.*]] = add nuw nsw i8 [[X]], [[Y]]
+; CHECK-NEXT:    [[R:%.*]] = sitofp i8 [[TMP1]] to half
+; CHECK-NEXT:    ret half [[R]]
+;
+  %x = and i8 %x_in, 63
+  %y = and i8 %y_in, 63
+  %xf = sitofp i8 %x to half
+  %yf = uitofp i8 %y to half
+  %r = fadd half %xf, %yf
+  ret half %r
+}
+
+define half @test_ui_si_i8_add_overflow(i8 noundef %x_in, i8 noundef %y_in) {
+; CHECK-LABEL: @test_ui_si_i8_add_overflow(
+; CHECK-NEXT:    [[X:%.*]] = and i8 [[X_IN:%.*]], 63
+; CHECK-NEXT:    [[Y:%.*]] = and i8 [[Y_IN:%.*]], 65
+; CHECK-NEXT:    [[XF:%.*]] = sitofp i8 [[X]] to half
+; CHECK-NEXT:    [[YF:%.*]] = uitofp i8 [[Y]] to half
+; CHECK-NEXT:    [[R:%.*]] = fadd half [[XF]], [[YF]]
+; CHECK-NEXT:    ret half [[R]]
+;
+  %x = and i8 %x_in, 63
+  %y = and i8 %y_in, 65
+  %xf = sitofp i8 %x to half
+  %yf = uitofp i8 %y to half
+  %r = fadd half %xf, %yf
+  ret half %r
+}
+
+define half @test_ui_ui_i8_sub_C(i8 noundef %x_in) {
+; CHECK-LABEL: @test_ui_ui_i8_sub_C(
+; CHECK-NEXT:    [[TMP1:%.*]] = and i8 [[X_IN:%.*]], 127
+; CHECK-NEXT:    [[R:%.*]] = uitofp i8 [[TMP1]] to half
+; CHECK-NEXT:    ret half [[R]]
+;
+  %x = or i8 %x_in, 128
+  %xf = uitofp i8 %x to half
+  %r = fsub half %xf, 128.0
+  ret half %r
+}
+
+define half @test_ui_ui_i8_sub_C_fail_overflow(i8 noundef %x_in)...
[truncated]

``````````

</details>


https://github.com/llvm/llvm-project/pull/82555


More information about the llvm-commits mailing list