[Mlir-commits] [mlir] [mlir][math] Reland 58ef9bec071383744fb703ff08df9806f25e4095 (PR #85436)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Fri Mar 15 17:52:56 PDT 2024


llvmbot wrote:


<!--LLVM PR SUMMARY COMMENT-->

@llvm/pr-subscribers-mlir-math

Author: None (srcarroll)

<details>
<summary>Changes</summary>

The previous implementation decomposes tanh(x) into
(exp(2x) - 1)/(exp(2x)+1), x < 0
(1 - exp(-2x))/(1 + exp(-2x)), x >= 0
This is fine as it avoids overflow with the exponential, but the whole decomposition is computed for both cases unconditionally, then the result is chosen based off the sign of the input. This results in doing two expensive exp computations.

The proposed change avoids doing the whole computation twice by exploiting the reflection symmetry tanh(-x) = -tanh(x). We can "normalize" the input to be positive by setting y = sign(x) * x, where the sign of x is computed as sign(x) = (float)(x > 0) * (-2) + 1. Then compute z = tanh(y) with the decomposition above for x >=0 and "denormalize" the result z * sign(x) to retain the sign. The reason it is done this way is that it is very amenable to vectorization.

This method trades the duplicate decomposition computations (which takes 5 instructions including an extra expensive exp and div) for 4 cheap instructions to compute the signs value

arith.cmpf (which is a pre-existing instruction in the previous impl)
arith.sitofp
arith.mulf
arith.addf
and 1 more instruction to get the right sign in the result
5. arith.mulf. Moreover, numerically, this implementation will yield the exact same results as the previous implementation.

As part of the relanding, a casting issue from the original commit has been fixed, i.e. casting bool to float with `uitofp`. Additionally a correctness test with `mlir-cpu-runner` has been added.

---
Full diff: https://github.com/llvm/llvm-project/pull/85436.diff


3 Files Affected:

- (modified) mlir/lib/Dialect/Math/Transforms/ExpandPatterns.cpp (+23-17) 
- (modified) mlir/test/Dialect/Math/expand-math.mlir (+9-10) 
- (modified) mlir/test/mlir-cpu-runner/test-expand-math-approx.mlir (+19) 


``````````diff
diff --git a/mlir/lib/Dialect/Math/Transforms/ExpandPatterns.cpp b/mlir/lib/Dialect/Math/Transforms/ExpandPatterns.cpp
index 989a3e5536ec66..fceafcff8490c3 100644
--- a/mlir/lib/Dialect/Math/Transforms/ExpandPatterns.cpp
+++ b/mlir/lib/Dialect/Math/Transforms/ExpandPatterns.cpp
@@ -91,34 +91,40 @@ static LogicalResult convertCoshOp(math::CoshOp op, PatternRewriter &rewriter) {
 }
 
 /// Expands tanh op into
-///   1) 1-exp^{-2x} / 1+exp^{-2x}, if x => 0
-///   2) exp^{2x}-1 / exp^{2x}+1  , if x < 0
+/// 1-exp^{-2x} / 1+exp^{-2x}
+/// To avoid overflow we exploit the reflection symmetry `tanh(-x) = -tanh(x)`.
+/// We compute a "signs" value which is -1 if input is negative and +1 if input
+/// is positive.  Then multiply the input by this value, guaranteeing that the
+/// result is positive, which also guarantees `exp^{-2x * sign(x)}` is in (0,
+/// 1]. Expand the computation on the input `x * sign(x)`, then multiply the
+/// result by `sign(x)` to retain sign of the real result.
 static LogicalResult convertTanhOp(math::TanhOp op, PatternRewriter &rewriter) {
   auto floatType = op.getOperand().getType();
   Location loc = op.getLoc();
+  Value zero = createFloatConst(loc, floatType, 0.0, rewriter);
   Value one = createFloatConst(loc, floatType, 1.0, rewriter);
-  Value two = createFloatConst(loc, floatType, 2.0, rewriter);
-  Value doubledX = rewriter.create<arith::MulFOp>(loc, op.getOperand(), two);
+  Value negTwo = createFloatConst(loc, floatType, -2.0, rewriter);
+
+  // Compute sign(x) = cast<float_type>(x < 0) * (-2) + 1
+  Value sign = rewriter.create<arith::CmpFOp>(loc, arith::CmpFPredicate::OLT,
+                                              op.getOperand(), zero);
+  sign = rewriter.create<arith::UIToFPOp>(loc, floatType, sign);
+  sign = rewriter.create<arith::MulFOp>(loc, sign, negTwo);
+  sign = rewriter.create<arith::AddFOp>(loc, sign, one);
 
-  // Case 1: tanh(x) = 1-exp^{-2x} / 1+exp^{-2x}
-  Value negDoubledX = rewriter.create<arith::NegFOp>(loc, doubledX);
+  // Normalize input to positive value: y = sign(x) * x
+  Value positiveX = rewriter.create<arith::MulFOp>(loc, sign, op.getOperand());
+
+  // Decompose on normalized input
+  Value negDoubledX = rewriter.create<arith::MulFOp>(loc, negTwo, positiveX);
   Value exp2x = rewriter.create<math::ExpOp>(loc, negDoubledX);
   Value dividend = rewriter.create<arith::SubFOp>(loc, one, exp2x);
   Value divisor = rewriter.create<arith::AddFOp>(loc, one, exp2x);
   Value positiveRes = rewriter.create<arith::DivFOp>(loc, dividend, divisor);
 
-  // Case 2: tanh(x) = exp^{2x}-1 / exp^{2x}+1
-  exp2x = rewriter.create<math::ExpOp>(loc, doubledX);
-  dividend = rewriter.create<arith::SubFOp>(loc, exp2x, one);
-  divisor = rewriter.create<arith::AddFOp>(loc, exp2x, one);
-  Value negativeRes = rewriter.create<arith::DivFOp>(loc, dividend, divisor);
+  // Multiply result by sign(x) to retain signs from negative inputs
+  rewriter.replaceOpWithNewOp<arith::MulFOp>(op, sign, positiveRes);
 
-  // tanh(x) = x >= 0 ? positiveRes : negativeRes
-  Value zero = createFloatConst(loc, floatType, 0.0, rewriter);
-  Value cmpRes = rewriter.create<arith::CmpFOp>(loc, arith::CmpFPredicate::OGE,
-                                                op.getOperand(), zero);
-  rewriter.replaceOpWithNewOp<arith::SelectOp>(op, cmpRes, positiveRes,
-                                               negativeRes);
   return success();
 }
 
diff --git a/mlir/test/Dialect/Math/expand-math.mlir b/mlir/test/Dialect/Math/expand-math.mlir
index 6ee65b085dad1b..6326d3a71874b4 100644
--- a/mlir/test/Dialect/Math/expand-math.mlir
+++ b/mlir/test/Dialect/Math/expand-math.mlir
@@ -7,19 +7,18 @@ func.func @tanh(%arg: f32) -> f32 {
 }
 // CHECK-DAG: %[[ZERO:.+]] = arith.constant 0.000000e+00 : f32
 // CHECK-DAG: %[[ONE:.+]] = arith.constant 1.000000e+00 : f32
-// CHECK-DAG: %[[TWO:.+]] = arith.constant 2.000000e+00 : f32
-// CHECK: %[[DOUBLEDX:.+]] = arith.mulf %arg0, %[[TWO]] : f32
-// CHECK: %[[NEGDOUBLEDX:.+]] = arith.negf %[[DOUBLEDX]] : f32
+// CHECK-DAG: %[[TWO:.+]] = arith.constant -2.000000e+00 : f32
+// CHECK: %[[VAL0:.+]] = arith.cmpf olt, %arg0, %[[ZERO]] : f32
+// CHECK: %[[VAL1:.+]] = arith.uitofp %[[VAL0]] : i1 to f32
+// CHECK: %[[VAL2:.+]] = arith.mulf %[[VAL1]], %[[TWO]] : f32
+// CHECK: %[[SIGN:.+]] = arith.addf %[[VAL2]], %[[ONE]] : f32
+// CHECK: %[[POSX:.+]] = arith.mulf %[[SIGN]], %arg0 : f32
+// CHECK: %[[NEGDOUBLEDX:.+]] = arith.mulf %[[POSX]], %[[TWO]] : f32
 // CHECK: %[[EXP1:.+]] = math.exp %[[NEGDOUBLEDX]] : f32
 // CHECK: %[[DIVIDEND1:.+]] = arith.subf %[[ONE]], %[[EXP1]] : f32
 // CHECK: %[[DIVISOR1:.+]] = arith.addf %[[EXP1]], %[[ONE]] : f32
-// CHECK: %[[RES1:.+]] = arith.divf %[[DIVIDEND1]], %[[DIVISOR1]] : f32
-// CHECK: %[[EXP2:.+]] = math.exp %[[DOUBLEDX]] : f32
-// CHECK: %[[DIVIDEND2:.+]] = arith.subf %[[EXP2]], %[[ONE]] : f32
-// CHECK: %[[DIVISOR2:.+]] = arith.addf %[[EXP2]], %[[ONE]] : f32
-// CHECK: %[[RES2:.+]] = arith.divf %[[DIVIDEND2]], %[[DIVISOR2]] : f32
-// CHECK: %[[COND:.+]] = arith.cmpf oge, %arg0, %[[ZERO]] : f32
-// CHECK: %[[RESULT:.+]] = arith.select %[[COND]], %[[RES1]], %[[RES2]] : f32
+// CHECK: %[[POSRES:.+]] = arith.divf %[[DIVIDEND1]], %[[DIVISOR1]] : f32
+// CHECK: %[[RESULT:.+]] = arith.mulf %[[SIGN]], %[[POSRES]] : f32
 // CHECK: return %[[RESULT]]
 
 // -----
diff --git a/mlir/test/mlir-cpu-runner/test-expand-math-approx.mlir b/mlir/test/mlir-cpu-runner/test-expand-math-approx.mlir
index 541a201c94c586..e2229a392bbf76 100644
--- a/mlir/test/mlir-cpu-runner/test-expand-math-approx.mlir
+++ b/mlir/test/mlir-cpu-runner/test-expand-math-approx.mlir
@@ -683,6 +683,24 @@ func.func @cosh() {
  return
 }
 
+// -------------------------------------------------------------------------- //
+// Tanh.
+// -------------------------------------------------------------------------- //
+
+func.func @tanh_8xf32(%a : vector<8xf32>) {
+  %r = math.tanh %a : vector<8xf32>
+  vector.print %r : vector<8xf32>
+  return
+}
+
+func.func @tanh() {
+  // CHECK: -1, -0.761594, -0.291313, 0, 0.291313, 0.761594, 1, 1
+  %v3 = arith.constant dense<[0xff800000, -1.0, -0.3, 0.0, 0.3, 1.0, 10.0, 0x7f800000]> : vector<8xf32>
+  call @tanh_8xf32(%v3) : (vector<8xf32>) -> ()
+
+ return
+}
+
 func.func @main() {
   call @exp2f() : () -> ()
   call @roundf() : () -> ()
@@ -690,5 +708,6 @@ func.func @main() {
   call @roundeven() : () -> ()
   call @sinh() : () -> ()
   call @cosh() : () -> ()
+  call @tanh() : () -> ()
   return
 }

``````````

</details>


https://github.com/llvm/llvm-project/pull/85436


More information about the Mlir-commits mailing list