[flang-commits] [flang] 00f7096 - [mlir][math] Rename math.abs -> math.absf
Jeff Niu via flang-commits
flang-commits at lists.llvm.org
Mon Aug 8 08:05:10 PDT 2022
Author: Jeff Niu
Date: 2022-08-08T11:04:58-04:00
New Revision: 00f7096d31cc7896ffd490e65104d264923f0df5
URL: https://github.com/llvm/llvm-project/commit/00f7096d31cc7896ffd490e65104d264923f0df5
DIFF: https://github.com/llvm/llvm-project/commit/00f7096d31cc7896ffd490e65104d264923f0df5.diff
LOG: [mlir][math] Rename math.abs -> math.absf
To make room for introducing `math.absi`.
Reviewed By: ftynse
Differential Revision: https://reviews.llvm.org/D131325
Added:
Modified:
flang/lib/Lower/IntrinsicCall.cpp
flang/test/Intrinsics/math-codegen.fir
flang/test/Lower/Intrinsics/abs.f90
flang/test/Lower/array-expression.f90
flang/test/Lower/math-lowering.f90
mlir/docs/Bindings/Python.md
mlir/include/mlir/Dialect/Math/IR/MathBase.td
mlir/include/mlir/Dialect/Math/IR/MathOps.td
mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp
mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp
mlir/lib/Conversion/MathToSPIRV/MathToSPIRV.cpp
mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
mlir/lib/Dialect/Math/IR/MathOps.cpp
mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
mlir/test/Conversion/MathToSPIRV/math-to-gl-spirv.mlir
mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir
mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir
mlir/test/Dialect/Math/canonicalize.mlir
mlir/test/Dialect/Math/polynomial-approximation.mlir
mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
mlir/test/IR/core-ops.mlir
mlir/test/Target/Cpp/invalid.mlir
mlir/test/python/dialects/linalg/opdsl/emit_misc.py
Removed:
################################################################################
diff --git a/flang/lib/Lower/IntrinsicCall.cpp b/flang/lib/Lower/IntrinsicCall.cpp
index 2294780910bdd..af1b16fdc8d9b 100644
--- a/flang/lib/Lower/IntrinsicCall.cpp
+++ b/flang/lib/Lower/IntrinsicCall.cpp
@@ -1207,10 +1207,10 @@ static mlir::Value genMathOp(fir::FirOpBuilder &builder, mlir::Location loc,
/// See https://gcc.gnu.org/onlinedocs/gcc-12.1.0/gfortran/\
/// Intrinsic-Procedures.html for a reference.
static constexpr MathOperation mathOperations[] = {
- {"abs", "fabsf", genF32F32FuncType, genMathOp<mlir::math::AbsOp>},
- {"abs", "fabs", genF64F64FuncType, genMathOp<mlir::math::AbsOp>},
+ {"abs", "fabsf", genF32F32FuncType, genMathOp<mlir::math::AbsFOp>},
+ {"abs", "fabs", genF64F64FuncType, genMathOp<mlir::math::AbsFOp>},
{"abs", "llvm.fabs.f128", genF128F128FuncType,
- genMathOp<mlir::math::AbsOp>},
+ genMathOp<mlir::math::AbsFOp>},
// llvm.trunc behaves the same way as libm's trunc.
{"aint", "llvm.trunc.f32", genF32F32FuncType, genLibCall},
{"aint", "llvm.trunc.f64", genF64F64FuncType, genLibCall},
diff --git a/flang/test/Intrinsics/math-codegen.fir b/flang/test/Intrinsics/math-codegen.fir
index f1b51c4a37d05..c8f99b599b599 100644
--- a/flang/test/Intrinsics/math-codegen.fir
+++ b/flang/test/Intrinsics/math-codegen.fir
@@ -21,7 +21,7 @@
func.func @_QPtest_real4(%arg0: !fir.ref<f32> {fir.bindc_name = "x"}) -> f32 {
%0 = fir.alloca f32 {bindc_name = "test_real4", uniq_name = "_QFtest_real4Etest_real4"}
%1 = fir.load %arg0 : !fir.ref<f32>
- %2 = math.abs %1 : f32
+ %2 = math.absf %1 : f32
fir.store %2 to %0 : !fir.ref<f32>
%3 = fir.load %0 : !fir.ref<f32>
return %3 : f32
@@ -29,7 +29,7 @@ func.func @_QPtest_real4(%arg0: !fir.ref<f32> {fir.bindc_name = "x"}) -> f32 {
func.func @_QPtest_real8(%arg0: !fir.ref<f64> {fir.bindc_name = "x"}) -> f64 {
%0 = fir.alloca f64 {bindc_name = "test_real8", uniq_name = "_QFtest_real8Etest_real8"}
%1 = fir.load %arg0 : !fir.ref<f64>
- %2 = math.abs %1 : f64
+ %2 = math.absf %1 : f64
fir.store %2 to %0 : !fir.ref<f64>
%3 = fir.load %0 : !fir.ref<f64>
return %3 : f64
@@ -37,7 +37,7 @@ func.func @_QPtest_real8(%arg0: !fir.ref<f64> {fir.bindc_name = "x"}) -> f64 {
func.func @_QPtest_real16(%arg0: !fir.ref<f128> {fir.bindc_name = "x"}) -> f128 {
%0 = fir.alloca f128 {bindc_name = "test_real16", uniq_name = "_QFtest_real16Etest_real16"}
%1 = fir.load %arg0 : !fir.ref<f128>
- %2 = math.abs %1 : f128
+ %2 = math.absf %1 : f128
fir.store %2 to %0 : !fir.ref<f128>
%3 = fir.load %0 : !fir.ref<f128>
return %3 : f128
@@ -93,7 +93,7 @@ func.func private @hypot(f64, f64) -> f64
func.func @_QPtest_real4(%arg0: !fir.ref<f32> {fir.bindc_name = "x"}) -> f32 {
%0 = fir.alloca f32 {bindc_name = "test_real4", uniq_name = "_QFtest_real4Etest_real4"}
%1 = fir.load %arg0 : !fir.ref<f32>
- %2 = math.abs %1 : f32
+ %2 = math.absf %1 : f32
fir.store %2 to %0 : !fir.ref<f32>
%3 = fir.load %0 : !fir.ref<f32>
return %3 : f32
@@ -101,7 +101,7 @@ func.func @_QPtest_real4(%arg0: !fir.ref<f32> {fir.bindc_name = "x"}) -> f32 {
func.func @_QPtest_real8(%arg0: !fir.ref<f64> {fir.bindc_name = "x"}) -> f64 {
%0 = fir.alloca f64 {bindc_name = "test_real8", uniq_name = "_QFtest_real8Etest_real8"}
%1 = fir.load %arg0 : !fir.ref<f64>
- %2 = math.abs %1 : f64
+ %2 = math.absf %1 : f64
fir.store %2 to %0 : !fir.ref<f64>
%3 = fir.load %0 : !fir.ref<f64>
return %3 : f64
@@ -109,7 +109,7 @@ func.func @_QPtest_real8(%arg0: !fir.ref<f64> {fir.bindc_name = "x"}) -> f64 {
func.func @_QPtest_real16(%arg0: !fir.ref<f128> {fir.bindc_name = "x"}) -> f128 {
%0 = fir.alloca f128 {bindc_name = "test_real16", uniq_name = "_QFtest_real16Etest_real16"}
%1 = fir.load %arg0 : !fir.ref<f128>
- %2 = math.abs %1 : f128
+ %2 = math.absf %1 : f128
fir.store %2 to %0 : !fir.ref<f128>
%3 = fir.load %0 : !fir.ref<f128>
return %3 : f128
@@ -1934,4 +1934,3 @@ func.func @_QPtest_real8(%arg0: !fir.ref<f64> {fir.bindc_name = "x"}) -> f64 {
}
func.func private @tanf(f32) -> f32
func.func private @tan(f64) -> f64
-
diff --git a/flang/test/Lower/Intrinsics/abs.f90 b/flang/test/Lower/Intrinsics/abs.f90
index ee8be65f89d0c..80960daecfb43 100644
--- a/flang/test/Lower/Intrinsics/abs.f90
+++ b/flang/test/Lower/Intrinsics/abs.f90
@@ -36,7 +36,7 @@ subroutine abs_testi16(a, b)
subroutine abs_testh(a, b)
! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<f16>
! CHECK: %[[VAL_2_1:.*]] = fir.convert %[[VAL_2]] : (f16) -> f32
-! CHECK: %[[VAL_3:.*]] = math.abs %[[VAL_2_1]] : f32
+! CHECK: %[[VAL_3:.*]] = math.absf %[[VAL_2_1]] : f32
! CHECK: %[[VAL_3_1:.*]] = fir.convert %[[VAL_3]] : (f32) -> f16
! CHECK: fir.store %[[VAL_3_1]] to %[[VAL_1]] : !fir.ref<f16>
! CHECK: return
@@ -49,7 +49,7 @@ subroutine abs_testh(a, b)
subroutine abs_testb(a, b)
! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<bf16>
! CHECK: %[[VAL_2_1:.*]] = fir.convert %[[VAL_2]] : (bf16) -> f32
-! CHECK: %[[VAL_3:.*]] = math.abs %[[VAL_2_1]] : f32
+! CHECK: %[[VAL_3:.*]] = math.absf %[[VAL_2_1]] : f32
! CHECK: %[[VAL_3_1:.*]] = fir.convert %[[VAL_3]] : (f32) -> bf16
! CHECK: fir.store %[[VAL_3_1]] to %[[VAL_1]] : !fir.ref<bf16>
! CHECK: return
@@ -61,7 +61,7 @@ subroutine abs_testb(a, b)
! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<f32>{{.*}}, %[[VAL_1:.*]]: !fir.ref<f32>{{.*}}) {
subroutine abs_testr(a, b)
! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<f32>
-! CHECK: %[[VAL_3:.*]] = math.abs %[[VAL_2]] : f32
+! CHECK: %[[VAL_3:.*]] = math.absf %[[VAL_2]] : f32
! CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref<f32>
! CHECK: return
real :: a, b
@@ -72,7 +72,7 @@ subroutine abs_testr(a, b)
! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<f64>{{.*}}, %[[VAL_1:.*]]: !fir.ref<f64>{{.*}}) {
subroutine abs_testd(a, b)
! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<f64>
-! CHECK: %[[VAL_3:.*]] = math.abs %[[VAL_2]] : f64
+! CHECK: %[[VAL_3:.*]] = math.absf %[[VAL_2]] : f64
! CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref<f64>
! CHECK: return
real(kind=8) :: a, b
@@ -83,7 +83,7 @@ subroutine abs_testd(a, b)
! CHECK-SAME: %[[VAL_0:.*]]: !fir.ref<f128>{{.*}}, %[[VAL_1:.*]]: !fir.ref<f128>{{.*}}) {
subroutine abs_testr16(a, b)
! CHECK: %[[VAL_2:.*]] = fir.load %[[VAL_0]] : !fir.ref<f128>
-! CHECK: %[[VAL_3:.*]] = math.abs %[[VAL_2]] : f128
+! CHECK: %[[VAL_3:.*]] = math.absf %[[VAL_2]] : f128
! CHECK: fir.store %[[VAL_3]] to %[[VAL_1]] : !fir.ref<f128>
! CHECK: return
real(kind=16) :: a, b
diff --git a/flang/test/Lower/array-expression.f90 b/flang/test/Lower/array-expression.f90
index 953fd3ad72fe0..6375d2d55b10a 100644
--- a/flang/test/Lower/array-expression.f90
+++ b/flang/test/Lower/array-expression.f90
@@ -116,15 +116,15 @@ end subroutine test5
! CHECK: %[[VAL_5:.*]] = fir.load %[[VAL_3]] : !fir.ref<i32>
! CHECK: %[[VAL_6:.*]] = fir.convert %[[VAL_5]] : (i32) -> i64
! CHECK: %[[VAL_7A:.*]] = fir.convert %[[VAL_6]] : (i64) -> index
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[VAL_7A]], %[[C0]] : index
-! CHECK: %[[VAL_7:.*]] = arith.select %[[CMP]], %[[VAL_7A]], %[[C0]] : index
+! CHECK: %[[C0:.*]] = arith.constant 0 : index
+! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[VAL_7A]], %[[C0]] : index
+! CHECK: %[[VAL_7:.*]] = arith.select %[[CMP]], %[[VAL_7A]], %[[C0]] : index
! CHECK: %[[VAL_8:.*]] = fir.load %[[VAL_4]] : !fir.ref<i32>
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i32) -> i64
! CHECK: %[[VAL_10A:.*]] = fir.convert %[[VAL_9]] : (i64) -> index
-! CHECK: %[[C0_2:.*]] = arith.constant 0 : index
-! CHECK: %[[CMP_2:.*]] = arith.cmpi sgt, %[[VAL_10A]], %[[C0_2]] : index
-! CHECK: %[[VAL_10:.*]] = arith.select %[[CMP_2]], %[[VAL_10A]], %[[C0_2]] : index
+! CHECK: %[[C0_2:.*]] = arith.constant 0 : index
+! CHECK: %[[CMP_2:.*]] = arith.cmpi sgt, %[[VAL_10A]], %[[C0_2]] : index
+! CHECK: %[[VAL_10:.*]] = arith.select %[[CMP_2]], %[[VAL_10A]], %[[C0_2]] : index
! CHECK: %[[VAL_11:.*]] = arith.constant 3 : i64
! CHECK: %[[VAL_12:.*]] = fir.convert %[[VAL_11]] : (i64) -> index
! CHECK: %[[VAL_13:.*]] = arith.constant 4 : i64
@@ -255,15 +255,15 @@ end subroutine test6b
! CHECK: %[[VAL_3:.*]] = fir.load %[[VAL_2]] : !fir.ref<i32>
! CHECK: %[[VAL_4:.*]] = fir.convert %[[VAL_3]] : (i32) -> i64
! CHECK: %[[VAL_5A:.*]] = fir.convert %[[VAL_4]] : (i64) -> index
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[VAL_5A]], %[[C0]] : index
-! CHECK: %[[VAL_5:.*]] = arith.select %[[CMP]], %[[VAL_5A]], %[[C0]] : index
+! CHECK: %[[C0:.*]] = arith.constant 0 : index
+! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[VAL_5A]], %[[C0]] : index
+! CHECK: %[[VAL_5:.*]] = arith.select %[[CMP]], %[[VAL_5A]], %[[C0]] : index
! CHECK: %[[VAL_6:.*]] = fir.load %[[VAL_2]] : !fir.ref<i32>
! CHECK: %[[VAL_7:.*]] = fir.convert %[[VAL_6]] : (i32) -> i64
! CHECK: %[[VAL_8A:.*]] = fir.convert %[[VAL_7]] : (i64) -> index
-! CHECK: %[[C0_2:.*]] = arith.constant 0 : index
-! CHECK: %[[CMP_2:.*]] = arith.cmpi sgt, %[[VAL_8A]], %[[C0_2]] : index
-! CHECK: %[[VAL_8:.*]] = arith.select %[[CMP_2]], %[[VAL_8A]], %[[C0_2]] : index
+! CHECK: %[[C0_2:.*]] = arith.constant 0 : index
+! CHECK: %[[CMP_2:.*]] = arith.cmpi sgt, %[[VAL_8A]], %[[C0_2]] : index
+! CHECK: %[[VAL_8:.*]] = arith.select %[[CMP_2]], %[[VAL_8A]], %[[C0_2]] : index
! CHECK: %[[VAL_9:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1>
! CHECK: %[[VAL_10:.*]] = fir.array_load %[[VAL_0]](%[[VAL_9]]) : (!fir.ref<!fir.array<?xf32>>, !fir.shape<1>) -> !fir.array<?xf32>
! CHECK: %[[VAL_11:.*]] = fir.shape %[[VAL_5]] : (index) -> !fir.shape<1>
@@ -471,7 +471,7 @@ subroutine test15(a,b)
real :: a(100), b(100)
! CHECK: %[[loop:.*]] = fir.do_loop %[[i:.*]] = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[bth:.*]] = %[[barr]]) -> (!fir.array<100xf32>) {
! CHECK: %[[val:.*]] = fir.array_fetch %[[aarr]], %[[i]] : (!fir.array<100xf32>, index) -> f32
- ! CHECK: %[[fres:.*]] = math.abs %[[val]] : f32
+ ! CHECK: %[[fres:.*]] = math.absf %[[val]] : f32
! CHECK: %[[res:.*]] = fir.array_update %[[bth]], %[[fres]], %[[i]] : (!fir.array<100xf32>, f32, index) -> !fir.array<100xf32>
! CHECK: fir.result %[[res]] : !fir.array<100xf32>
! CHECK: fir.array_merge_store %[[barr]], %[[loop]] to %[[b]]
@@ -1065,9 +1065,9 @@ end subroutine test19g
! CHECK: %[[VAL_13:.*]] = fir.load %[[VAL_3]] : !fir.ref<i32>
! CHECK: %[[VAL_14:.*]] = fir.convert %[[VAL_13]] : (i32) -> i64
! CHECK: %[[VAL_15A:.*]] = fir.convert %[[VAL_14]] : (i64) -> index
-! CHECK: %[[C0:.*]] = arith.constant 0 : index
-! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[VAL_15A]], %[[C0]] : index
-! CHECK: %[[VAL_15:.*]] = arith.select %[[CMP]], %[[VAL_15A]], %[[C0]] : index
+! CHECK: %[[C0:.*]] = arith.constant 0 : index
+! CHECK: %[[CMP:.*]] = arith.cmpi sgt, %[[VAL_15A]], %[[C0]] : index
+! CHECK: %[[VAL_15:.*]] = arith.select %[[CMP]], %[[VAL_15A]], %[[C0]] : index
! CHECK: %[[VAL_16:.*]] = fir.shape %[[VAL_10]] : (index) -> !fir.shape<1>
! CHECK: %[[VAL_17:.*]] = fir.array_load %[[VAL_9]](%[[VAL_16]]) typeparams %[[VAL_8]] : (!fir.ref<!fir.array<70x!fir.char<1,?>>>, !fir.shape<1>, i32) -> !fir.array<70x!fir.char<1,?>>
! CHECK: %[[VAL_18:.*]] = arith.constant 1 : i64
diff --git a/flang/test/Lower/math-lowering.f90 b/flang/test/Lower/math-lowering.f90
index 7a74068d780fb..4fda7bd041f15 100644
--- a/flang/test/Lower/math-lowering.f90
+++ b/flang/test/Lower/math-lowering.f90
@@ -14,8 +14,8 @@ function test_real4(x)
end function
! ALL-LABEL: @_QPtest_real4
-! FAST: {{%[A-Za-z0-9._]+}} = math.abs {{%[A-Za-z0-9._]+}} : f32
-! RELAXED: {{%[A-Za-z0-9._]+}} = math.abs {{%[A-Za-z0-9._]+}} : f32
+! FAST: {{%[A-Za-z0-9._]+}} = math.absf {{%[A-Za-z0-9._]+}} : f32
+! RELAXED: {{%[A-Za-z0-9._]+}} = math.absf {{%[A-Za-z0-9._]+}} : f32
! PRECISE: {{%[A-Za-z0-9._]+}} = fir.call @fabsf({{%[A-Za-z0-9._]+}}) : (f32) -> f32
function test_real8(x)
@@ -24,8 +24,8 @@ function test_real8(x)
end function
! ALL-LABEL: @_QPtest_real8
-! FAST: {{%[A-Za-z0-9._]+}} = math.abs {{%[A-Za-z0-9._]+}} : f64
-! RELAXED: {{%[A-Za-z0-9._]+}} = math.abs {{%[A-Za-z0-9._]+}} : f64
+! FAST: {{%[A-Za-z0-9._]+}} = math.absf {{%[A-Za-z0-9._]+}} : f64
+! RELAXED: {{%[A-Za-z0-9._]+}} = math.absf {{%[A-Za-z0-9._]+}} : f64
! PRECISE: {{%[A-Za-z0-9._]+}} = fir.call @fabs({{%[A-Za-z0-9._]+}}) : (f64) -> f64
function test_real16(x)
@@ -33,8 +33,8 @@ function test_real16(x)
test_real16 = abs(x)
end function
! ALL-LABEL: @_QPtest_real16
-! FAST: {{%[A-Za-z0-9._]+}} = math.abs {{%[A-Za-z0-9._]+}} : f128
-! RELAXED: {{%[A-Za-z0-9._]+}} = math.abs {{%[A-Za-z0-9._]+}} : f128
+! FAST: {{%[A-Za-z0-9._]+}} = math.absf {{%[A-Za-z0-9._]+}} : f128
+! RELAXED: {{%[A-Za-z0-9._]+}} = math.absf {{%[A-Za-z0-9._]+}} : f128
! PRECISE: {{%[A-Za-z0-9._]+}} = fir.call @llvm.fabs.f128({{%[A-Za-z0-9._]+}}) : (f128) -> f128
function test_complex4(c)
diff --git a/mlir/docs/Bindings/Python.md b/mlir/docs/Bindings/Python.md
index c00446d281e5c..cdb00dc22146e 100644
--- a/mlir/docs/Bindings/Python.md
+++ b/mlir/docs/Bindings/Python.md
@@ -940,7 +940,7 @@ Each concrete `OpView` subclass further defines several public-intended
attributes:
* `OPERATION_NAME` attribute with the `str` fully qualified operation name
- (i.e. `math.abs`).
+ (i.e. `math.absf`).
* An `__init__` method for the *default builder* if one is defined or inferred
for the operation.
* `@property` getter for each operand or result (using an auto-generated name
@@ -1170,4 +1170,3 @@ exist for this functionality, which can then be wrapped using pybind11 and
utilities to connect to the rest of Python API. The bindings can be located in a
separate pybind11 module or in the same module as attributes and types, and
loaded along with the dialect.
-
diff --git a/mlir/include/mlir/Dialect/Math/IR/MathBase.td b/mlir/include/mlir/Dialect/Math/IR/MathBase.td
index 85c8c2baff652..0e1d19439612c 100644
--- a/mlir/include/mlir/Dialect/Math/IR/MathBase.td
+++ b/mlir/include/mlir/Dialect/Math/IR/MathBase.td
@@ -20,13 +20,13 @@ def Math_Dialect : Dialect {
```mlir
// Scalar absolute value.
- %a = math.abs %b : f64
-
+ %a = math.absf %b : f64
+
// Vector elementwise absolute value.
- %f = math.abs %g : vector<4xf32>
+ %f = math.absf %g : vector<4xf32>
// Tensor elementwise absolute value.
- %x = math.abs %y : tensor<4x?xf8>
+ %x = math.absf %y : tensor<4x?xf8>
```
}];
let hasConstantMaterializer = 1;
diff --git a/mlir/include/mlir/Dialect/Math/IR/MathOps.td b/mlir/include/mlir/Dialect/Math/IR/MathOps.td
index 99538be12958a..27ab3a3b4656d 100644
--- a/mlir/include/mlir/Dialect/Math/IR/MathOps.td
+++ b/mlir/include/mlir/Dialect/Math/IR/MathOps.td
@@ -66,21 +66,21 @@ class Math_FloatTernaryOp<string mnemonic, list<Trait> traits = []> :
}
//===----------------------------------------------------------------------===//
-// AbsOp
+// AbsFOp
//===----------------------------------------------------------------------===//
-def Math_AbsOp : Math_FloatUnaryOp<"abs"> {
+def Math_AbsFOp : Math_FloatUnaryOp<"absf"> {
let summary = "floating point absolute-value operation";
let description = [{
- The `abs` operation computes the absolute value. It takes one operand of
- floating point type (i.e., scalar, tensor or vector) and returns one result of
- the same type.
+ The `absf` operation computes the absolute value. It takes one operand of
+ floating point type (i.e., scalar, tensor or vector) and returns one result
+ of the same type.
Example:
```mlir
// Scalar absolute value.
- %a = math.abs %b : f64
+ %a = math.absf %b : f64
```
}];
let hasFolder = 1;
@@ -446,7 +446,7 @@ def Math_FloorOp : Math_FloatUnaryOp<"floor"> {
```mlir
// Scalar floor value.
- %a = math.floor %b : f64
+ %a = math.floor %b : f64
```
}];
}
@@ -611,7 +611,7 @@ def Math_RsqrtOp : Math_FloatUnaryOp<"rsqrt"> {
one result of the same type. It has no standard attributes.
Example:
-
+
```mlir
// Scalar reciprocal square root value.
%a = math.rsqrt %b : f64
diff --git a/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp b/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp
index 9e9ba52e70aa5..643806f2b0fa0 100644
--- a/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp
+++ b/mlir/lib/Conversion/ComplexToStandard/ComplexToStandard.cpp
@@ -288,10 +288,10 @@ struct DivOpConversion : public OpConversionPattern<complex::DivOp> {
// Case 1. Zero denominator, numerator contains at most one NaN value.
Value zero = rewriter.create<arith::ConstantOp>(
loc, elementType, rewriter.getZeroAttr(elementType));
- Value rhsRealAbs = rewriter.create<math::AbsOp>(loc, rhsReal);
+ Value rhsRealAbs = rewriter.create<math::AbsFOp>(loc, rhsReal);
Value rhsRealIsZero = rewriter.create<arith::CmpFOp>(
loc, arith::CmpFPredicate::OEQ, rhsRealAbs, zero);
- Value rhsImagAbs = rewriter.create<math::AbsOp>(loc, rhsImag);
+ Value rhsImagAbs = rewriter.create<math::AbsFOp>(loc, rhsImag);
Value rhsImagIsZero = rewriter.create<arith::CmpFOp>(
loc, arith::CmpFPredicate::OEQ, rhsImagAbs, zero);
Value lhsRealIsNotNaN = rewriter.create<arith::CmpFOp>(
@@ -321,10 +321,10 @@ struct DivOpConversion : public OpConversionPattern<complex::DivOp> {
loc, arith::CmpFPredicate::ONE, rhsImagAbs, inf);
Value rhsFinite =
rewriter.create<arith::AndIOp>(loc, rhsRealFinite, rhsImagFinite);
- Value lhsRealAbs = rewriter.create<math::AbsOp>(loc, lhsReal);
+ Value lhsRealAbs = rewriter.create<math::AbsFOp>(loc, lhsReal);
Value lhsRealInfinite = rewriter.create<arith::CmpFOp>(
loc, arith::CmpFPredicate::OEQ, lhsRealAbs, inf);
- Value lhsImagAbs = rewriter.create<math::AbsOp>(loc, lhsImag);
+ Value lhsImagAbs = rewriter.create<math::AbsFOp>(loc, lhsImag);
Value lhsImagInfinite = rewriter.create<arith::CmpFOp>(
loc, arith::CmpFPredicate::OEQ, lhsImagAbs, inf);
Value lhsInfinite =
@@ -533,25 +533,25 @@ struct MulOpConversion : public OpConversionPattern<complex::MulOp> {
auto elementType = type.getElementType().cast<FloatType>();
Value lhsReal = b.create<complex::ReOp>(elementType, adaptor.getLhs());
- Value lhsRealAbs = b.create<math::AbsOp>(lhsReal);
+ Value lhsRealAbs = b.create<math::AbsFOp>(lhsReal);
Value lhsImag = b.create<complex::ImOp>(elementType, adaptor.getLhs());
- Value lhsImagAbs = b.create<math::AbsOp>(lhsImag);
+ Value lhsImagAbs = b.create<math::AbsFOp>(lhsImag);
Value rhsReal = b.create<complex::ReOp>(elementType, adaptor.getRhs());
- Value rhsRealAbs = b.create<math::AbsOp>(rhsReal);
+ Value rhsRealAbs = b.create<math::AbsFOp>(rhsReal);
Value rhsImag = b.create<complex::ImOp>(elementType, adaptor.getRhs());
- Value rhsImagAbs = b.create<math::AbsOp>(rhsImag);
+ Value rhsImagAbs = b.create<math::AbsFOp>(rhsImag);
Value lhsRealTimesRhsReal = b.create<arith::MulFOp>(lhsReal, rhsReal);
- Value lhsRealTimesRhsRealAbs = b.create<math::AbsOp>(lhsRealTimesRhsReal);
+ Value lhsRealTimesRhsRealAbs = b.create<math::AbsFOp>(lhsRealTimesRhsReal);
Value lhsImagTimesRhsImag = b.create<arith::MulFOp>(lhsImag, rhsImag);
- Value lhsImagTimesRhsImagAbs = b.create<math::AbsOp>(lhsImagTimesRhsImag);
+ Value lhsImagTimesRhsImagAbs = b.create<math::AbsFOp>(lhsImagTimesRhsImag);
Value real =
b.create<arith::SubFOp>(lhsRealTimesRhsReal, lhsImagTimesRhsImag);
Value lhsImagTimesRhsReal = b.create<arith::MulFOp>(lhsImag, rhsReal);
- Value lhsImagTimesRhsRealAbs = b.create<math::AbsOp>(lhsImagTimesRhsReal);
+ Value lhsImagTimesRhsRealAbs = b.create<math::AbsFOp>(lhsImagTimesRhsReal);
Value lhsRealTimesRhsImag = b.create<arith::MulFOp>(lhsReal, rhsImag);
- Value lhsRealTimesRhsImagAbs = b.create<math::AbsOp>(lhsRealTimesRhsImag);
+ Value lhsRealTimesRhsImagAbs = b.create<math::AbsFOp>(lhsRealTimesRhsImag);
Value imag =
b.create<arith::AddFOp>(lhsImagTimesRhsReal, lhsRealTimesRhsImag);
@@ -762,7 +762,7 @@ struct SqrtOpConversion : public OpConversionPattern<complex::SqrtOp> {
Value real = b.create<complex::ReOp>(elementType, adaptor.getComplex());
Value imag = b.create<complex::ImOp>(elementType, adaptor.getComplex());
- Value absLhs = b.create<math::AbsOp>(real);
+ Value absLhs = b.create<math::AbsFOp>(real);
Value absArg = b.create<complex::AbsOp>(elementType, arg);
Value addAbs = b.create<arith::AddFOp>(absLhs, absArg);
diff --git a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
index b7158f22e7e4d..342ae35f1f9c1 100644
--- a/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
+++ b/mlir/lib/Conversion/GPUToNVVM/LowerGpuOpsToNVVMOps.cpp
@@ -254,8 +254,8 @@ void mlir::populateGpuToNVVMConversionPatterns(LLVMTypeConverter &converter,
StringAttr::get(&converter.getContext(),
NVVM::NVVMDialect::getKernelFuncAttrName()));
- patterns.add<OpToFuncCallLowering<math::AbsOp>>(converter, "__nv_fabsf",
- "__nv_fabs");
+ patterns.add<OpToFuncCallLowering<math::AbsFOp>>(converter, "__nv_fabsf",
+ "__nv_fabs");
patterns.add<OpToFuncCallLowering<math::AtanOp>>(converter, "__nv_atanf",
"__nv_atan");
patterns.add<OpToFuncCallLowering<math::Atan2Op>>(converter, "__nv_atan2f",
diff --git a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
index 163c4a363e784..2c7f700465193 100644
--- a/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
+++ b/mlir/lib/Conversion/GPUToROCDL/LowerGpuOpsToROCDLOps.cpp
@@ -181,8 +181,8 @@ void mlir::populateGpuToROCDLConversionPatterns(
patterns.add<GPUPrintfOpToLLVMCallLowering>(converter, /*addressSpace=*/4);
}
- patterns.add<OpToFuncCallLowering<math::AbsOp>>(converter, "__ocml_fabs_f32",
- "__ocml_fabs_f64");
+ patterns.add<OpToFuncCallLowering<math::AbsFOp>>(converter, "__ocml_fabs_f32",
+ "__ocml_fabs_f64");
patterns.add<OpToFuncCallLowering<math::AtanOp>>(converter, "__ocml_atan_f32",
"__ocml_atan_f64");
patterns.add<OpToFuncCallLowering<math::Atan2Op>>(
diff --git a/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp b/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp
index 94f1f629a03bf..1ac4bbd7492e8 100644
--- a/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp
+++ b/mlir/lib/Conversion/MathToLLVM/MathToLLVM.cpp
@@ -18,7 +18,7 @@
using namespace mlir;
namespace {
-using AbsOpLowering = VectorConvertToLLVMPattern<math::AbsOp, LLVM::FAbsOp>;
+using AbsFOpLowering = VectorConvertToLLVMPattern<math::AbsFOp, LLVM::FAbsOp>;
using CeilOpLowering = VectorConvertToLLVMPattern<math::CeilOp, LLVM::FCeilOp>;
using CopySignOpLowering =
VectorConvertToLLVMPattern<math::CopySignOp, LLVM::CopySignOp>;
@@ -268,7 +268,7 @@ void mlir::populateMathToLLVMConversionPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns) {
// clang-format off
patterns.add<
- AbsOpLowering,
+ AbsFOpLowering,
CeilOpLowering,
CopySignOpLowering,
CosOpLowering,
diff --git a/mlir/lib/Conversion/MathToSPIRV/MathToSPIRV.cpp b/mlir/lib/Conversion/MathToSPIRV/MathToSPIRV.cpp
index 49d470faee786..e53bd2b954947 100644
--- a/mlir/lib/Conversion/MathToSPIRV/MathToSPIRV.cpp
+++ b/mlir/lib/Conversion/MathToSPIRV/MathToSPIRV.cpp
@@ -287,7 +287,7 @@ void populateMathToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
patterns
.add<CountLeadingZerosPattern, Log1pOpPattern<spirv::GLLogOp>,
ExpM1OpPattern<spirv::GLExpOp>, PowFOpPattern, RoundOpPattern,
- spirv::ElementwiseOpPattern<math::AbsOp, spirv::GLFAbsOp>,
+ spirv::ElementwiseOpPattern<math::AbsFOp, spirv::GLFAbsOp>,
spirv::ElementwiseOpPattern<math::CeilOp, spirv::GLCeilOp>,
spirv::ElementwiseOpPattern<math::CosOp, spirv::GLCosOp>,
spirv::ElementwiseOpPattern<math::ExpOp, spirv::GLExpOp>,
@@ -302,7 +302,7 @@ void populateMathToSPIRVPatterns(SPIRVTypeConverter &typeConverter,
// OpenCL patterns
patterns.add<Log1pOpPattern<spirv::CLLogOp>, ExpM1OpPattern<spirv::CLExpOp>,
- spirv::ElementwiseOpPattern<math::AbsOp, spirv::CLFAbsOp>,
+ spirv::ElementwiseOpPattern<math::AbsFOp, spirv::CLFAbsOp>,
spirv::ElementwiseOpPattern<math::CeilOp, spirv::CLCeilOp>,
spirv::ElementwiseOpPattern<math::CosOp, spirv::CLCosOp>,
spirv::ElementwiseOpPattern<math::ErfOp, spirv::CLErfOp>,
diff --git a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
index b1da7ee7c975e..efaf612360852 100644
--- a/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
+++ b/mlir/lib/Conversion/TosaToLinalg/TosaToLinalg.cpp
@@ -50,7 +50,7 @@ createLinalgBodyCalculationForElementwiseOp(Operation *op, ValueRange args,
// tosa::AbsOp
if (isa<tosa::AbsOp>(op) && elementTy.isa<FloatType>())
- return rewriter.create<math::AbsOp>(loc, resultTypes, args);
+ return rewriter.create<math::AbsFOp>(loc, resultTypes, args);
if (isa<tosa::AbsOp>(op) && elementTy.isa<IntegerType>()) {
auto zero = rewriter.create<arith::ConstantOp>(
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
index d22151327faaf..650d7a2a511a2 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgOps.cpp
@@ -309,7 +309,7 @@ class RegionBuilderHelper {
case UnaryFn::log:
return builder.create<math::LogOp>(arg.getLoc(), arg);
case UnaryFn::abs:
- return builder.create<math::AbsOp>(arg.getLoc(), arg);
+ return builder.create<math::AbsFOp>(arg.getLoc(), arg);
case UnaryFn::ceil:
return builder.create<math::CeilOp>(arg.getLoc(), arg);
case UnaryFn::floor:
diff --git a/mlir/lib/Dialect/Math/IR/MathOps.cpp b/mlir/lib/Dialect/Math/IR/MathOps.cpp
index 05c9d6780dcd5..50d65127cd45a 100644
--- a/mlir/lib/Dialect/Math/IR/MathOps.cpp
+++ b/mlir/lib/Dialect/Math/IR/MathOps.cpp
@@ -22,14 +22,12 @@ using namespace mlir::math;
#include "mlir/Dialect/Math/IR/MathOps.cpp.inc"
//===----------------------------------------------------------------------===//
-// AbsOp folder
+// AbsFOp folder
//===----------------------------------------------------------------------===//
-OpFoldResult math::AbsOp::fold(ArrayRef<Attribute> operands) {
- return constFoldUnaryOp<FloatAttr>(operands, [](const APFloat &a) {
- const APFloat &result(a);
- return abs(result);
- });
+OpFoldResult math::AbsFOp::fold(ArrayRef<Attribute> operands) {
+ return constFoldUnaryOp<FloatAttr>(operands,
+ [](const APFloat &a) { return abs(a); });
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
index dbd611c07829c..dcb715405c31f 100644
--- a/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
+++ b/mlir/lib/Dialect/Math/Transforms/PolynomialApproximation.cpp
@@ -232,7 +232,7 @@ static std::pair<Value, Value> frexp(ImplicitLocOpBuilder &builder, Value arg,
Value normalizedFraction = builder.create<arith::BitcastOp>(f32Vec, tmp1);
// Compute exponent.
- Value arg0 = isPositive ? arg : builder.create<math::AbsOp>(arg);
+ Value arg0 = isPositive ? arg : builder.create<math::AbsFOp>(arg);
Value biasedExponentBits = builder.create<arith::ShRUIOp>(
builder.create<arith::BitcastOp>(i32Vec, arg0),
bcast(i32Cst(builder, 23)));
@@ -375,7 +375,7 @@ AtanApproximation::matchAndRewrite(math::AtanOp op,
// Remap the problem over [0.0, 1.0] by looking at the absolute value and the
// handling symmetry.
- Value abs = builder.create<math::AbsOp>(operand);
+ Value abs = builder.create<math::AbsFOp>(operand);
Value reciprocal = builder.create<arith::DivFOp>(one, abs);
Value compare =
builder.create<arith::CmpFOp>(arith::CmpFPredicate::OLT, abs, reciprocal);
@@ -507,7 +507,7 @@ TanhApproximation::matchAndRewrite(math::TanhOp op,
// Mask for tiny values that are approximated with `operand`.
Value tiny = bcast(f32Cst(builder, 0.0004f));
Value tinyMask = builder.create<arith::CmpFOp>(
- arith::CmpFPredicate::OLT, builder.create<math::AbsOp>(op.getOperand()),
+ arith::CmpFPredicate::OLT, builder.create<math::AbsFOp>(op.getOperand()),
tiny);
// The monomial coefficients of the numerator polynomial (odd).
diff --git a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
index 8625708793d15..b55388bb37698 100644
--- a/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
+++ b/mlir/lib/Dialect/SparseTensor/Utils/Merger.cpp
@@ -892,7 +892,7 @@ Optional<unsigned> Merger::buildTensorExp(linalg::GenericOp op, Value v) {
auto x = buildTensorExp(op, def->getOperand(0));
if (x.has_value()) {
unsigned e = x.value();
- if (isa<math::AbsOp>(def))
+ if (isa<math::AbsFOp>(def))
return addExp(kAbsF, e);
if (isa<complex::AbsOp>(def))
return addExp(kAbsC, e);
@@ -1073,7 +1073,7 @@ Value Merger::buildExp(RewriterBase &rewriter, Location loc, unsigned e,
llvm_unreachable("unexpected non-op");
// Unary operations.
case kAbsF:
- return rewriter.create<math::AbsOp>(loc, v0);
+ return rewriter.create<math::AbsFOp>(loc, v0);
case kAbsC: {
auto type = v0.getType().cast<ComplexType>();
auto eltType = type.getElementType().cast<FloatType>();
diff --git a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
index 9aff4ecc80e4b..cac758a89b61d 100644
--- a/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
+++ b/mlir/test/Conversion/ComplexToStandard/convert-to-standard.mlir
@@ -99,9 +99,9 @@ func.func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
// Case 1. Zero denominator, numerator contains at most one NaN value.
// CHECK: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32
-// CHECK: %[[RHS_REAL_ABS:.*]] = math.abs %[[RHS_REAL]] : f32
+// CHECK: %[[RHS_REAL_ABS:.*]] = math.absf %[[RHS_REAL]] : f32
// CHECK: %[[RHS_REAL_ABS_IS_ZERO:.*]] = arith.cmpf oeq, %[[RHS_REAL_ABS]], %[[ZERO]] : f32
-// CHECK: %[[RHS_IMAG_ABS:.*]] = math.abs %[[RHS_IMAG]] : f32
+// CHECK: %[[RHS_IMAG_ABS:.*]] = math.absf %[[RHS_IMAG]] : f32
// CHECK: %[[RHS_IMAG_ABS_IS_ZERO:.*]] = arith.cmpf oeq, %[[RHS_IMAG_ABS]], %[[ZERO]] : f32
// CHECK: %[[LHS_REAL_IS_NOT_NAN:.*]] = arith.cmpf ord, %[[LHS_REAL]], %[[ZERO]] : f32
// CHECK: %[[LHS_IMAG_IS_NOT_NAN:.*]] = arith.cmpf ord, %[[LHS_IMAG]], %[[ZERO]] : f32
@@ -117,9 +117,9 @@ func.func @complex_div(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
// CHECK: %[[RHS_REAL_FINITE:.*]] = arith.cmpf one, %[[RHS_REAL_ABS]], %[[INF]] : f32
// CHECK: %[[RHS_IMAG_FINITE:.*]] = arith.cmpf one, %[[RHS_IMAG_ABS]], %[[INF]] : f32
// CHECK: %[[RHS_IS_FINITE:.*]] = arith.andi %[[RHS_REAL_FINITE]], %[[RHS_IMAG_FINITE]] : i1
-// CHECK: %[[LHS_REAL_ABS:.*]] = math.abs %[[LHS_REAL]] : f32
+// CHECK: %[[LHS_REAL_ABS:.*]] = math.absf %[[LHS_REAL]] : f32
// CHECK: %[[LHS_REAL_INFINITE:.*]] = arith.cmpf oeq, %[[LHS_REAL_ABS]], %[[INF]] : f32
-// CHECK: %[[LHS_IMAG_ABS:.*]] = math.abs %[[LHS_IMAG]] : f32
+// CHECK: %[[LHS_IMAG_ABS:.*]] = math.absf %[[LHS_IMAG]] : f32
// CHECK: %[[LHS_IMAG_INFINITE:.*]] = arith.cmpf oeq, %[[LHS_IMAG_ABS]], %[[INF]] : f32
// CHECK: %[[LHS_IS_INFINITE:.*]] = arith.ori %[[LHS_REAL_INFINITE]], %[[LHS_IMAG_INFINITE]] : i1
// CHECK: %[[INF_NUM_FINITE_DENOM:.*]] = arith.andi %[[LHS_IS_INFINITE]], %[[RHS_IS_FINITE]] : i1
@@ -289,24 +289,24 @@ func.func @complex_mul(%lhs: complex<f32>, %rhs: complex<f32>) -> complex<f32> {
return %mul : complex<f32>
}
// CHECK: %[[LHS_REAL:.*]] = complex.re %[[LHS]] : complex<f32>
-// CHECK: %[[LHS_REAL_ABS:.*]] = math.abs %[[LHS_REAL]] : f32
+// CHECK: %[[LHS_REAL_ABS:.*]] = math.absf %[[LHS_REAL]] : f32
// CHECK: %[[LHS_IMAG:.*]] = complex.im %[[LHS]] : complex<f32>
-// CHECK: %[[LHS_IMAG_ABS:.*]] = math.abs %[[LHS_IMAG]] : f32
+// CHECK: %[[LHS_IMAG_ABS:.*]] = math.absf %[[LHS_IMAG]] : f32
// CHECK: %[[RHS_REAL:.*]] = complex.re %[[RHS]] : complex<f32>
-// CHECK: %[[RHS_REAL_ABS:.*]] = math.abs %[[RHS_REAL]] : f32
+// CHECK: %[[RHS_REAL_ABS:.*]] = math.absf %[[RHS_REAL]] : f32
// CHECK: %[[RHS_IMAG:.*]] = complex.im %[[RHS]] : complex<f32>
-// CHECK: %[[RHS_IMAG_ABS:.*]] = math.abs %[[RHS_IMAG]] : f32
+// CHECK: %[[RHS_IMAG_ABS:.*]] = math.absf %[[RHS_IMAG]] : f32
// CHECK: %[[LHS_REAL_TIMES_RHS_REAL:.*]] = arith.mulf %[[LHS_REAL]], %[[RHS_REAL]] : f32
-// CHECK: %[[LHS_REAL_TIMES_RHS_REAL_ABS:.*]] = math.abs %[[LHS_REAL_TIMES_RHS_REAL]] : f32
+// CHECK: %[[LHS_REAL_TIMES_RHS_REAL_ABS:.*]] = math.absf %[[LHS_REAL_TIMES_RHS_REAL]] : f32
// CHECK: %[[LHS_IMAG_TIMES_RHS_IMAG:.*]] = arith.mulf %[[LHS_IMAG]], %[[RHS_IMAG]] : f32
-// CHECK: %[[LHS_IMAG_TIMES_RHS_IMAG_ABS:.*]] = math.abs %[[LHS_IMAG_TIMES_RHS_IMAG]] : f32
+// CHECK: %[[LHS_IMAG_TIMES_RHS_IMAG_ABS:.*]] = math.absf %[[LHS_IMAG_TIMES_RHS_IMAG]] : f32
// CHECK: %[[REAL:.*]] = arith.subf %[[LHS_REAL_TIMES_RHS_REAL]], %[[LHS_IMAG_TIMES_RHS_IMAG]] : f32
// CHECK: %[[LHS_IMAG_TIMES_RHS_REAL:.*]] = arith.mulf %[[LHS_IMAG]], %[[RHS_REAL]] : f32
-// CHECK: %[[LHS_IMAG_TIMES_RHS_REAL_ABS:.*]] = math.abs %[[LHS_IMAG_TIMES_RHS_REAL]] : f32
+// CHECK: %[[LHS_IMAG_TIMES_RHS_REAL_ABS:.*]] = math.absf %[[LHS_IMAG_TIMES_RHS_REAL]] : f32
// CHECK: %[[LHS_REAL_TIMES_RHS_IMAG:.*]] = arith.mulf %[[LHS_REAL]], %[[RHS_IMAG]] : f32
-// CHECK: %[[LHS_REAL_TIMES_RHS_IMAG_ABS:.*]] = math.abs %[[LHS_REAL_TIMES_RHS_IMAG]] : f32
+// CHECK: %[[LHS_REAL_TIMES_RHS_IMAG_ABS:.*]] = math.absf %[[LHS_REAL_TIMES_RHS_IMAG]] : f32
// CHECK: %[[IMAG:.*]] = arith.addf %[[LHS_IMAG_TIMES_RHS_REAL]], %[[LHS_REAL_TIMES_RHS_IMAG]] : f32
// Handle cases where the "naive" calculation results in NaN values.
@@ -561,9 +561,9 @@ func.func @complex_tan(%arg: complex<f32>) -> complex<f32> {
// Case 1. Zero denominator, numerator contains at most one NaN value.
// CHECK: %[[ZERO:.*]] = arith.constant 0.000000e+00 : f32
-// CHECK: %[[RHS_REAL_ABS:.*]] = math.abs %[[RHS_REAL]] : f32
+// CHECK: %[[RHS_REAL_ABS:.*]] = math.absf %[[RHS_REAL]] : f32
// CHECK: %[[RHS_REAL_ABS_IS_ZERO:.*]] = arith.cmpf oeq, %[[RHS_REAL_ABS]], %[[ZERO]] : f32
-// CHECK: %[[RHS_IMAG_ABS:.*]] = math.abs %[[RHS_IMAG]] : f32
+// CHECK: %[[RHS_IMAG_ABS:.*]] = math.absf %[[RHS_IMAG]] : f32
// CHECK: %[[RHS_IMAG_ABS_IS_ZERO:.*]] = arith.cmpf oeq, %[[RHS_IMAG_ABS]], %[[ZERO]] : f32
// CHECK: %[[LHS_REAL_IS_NOT_NAN:.*]] = arith.cmpf ord, %[[LHS_REAL]], %[[ZERO]] : f32
// CHECK: %[[LHS_IMAG_IS_NOT_NAN:.*]] = arith.cmpf ord, %[[LHS_IMAG]], %[[ZERO]] : f32
@@ -579,9 +579,9 @@ func.func @complex_tan(%arg: complex<f32>) -> complex<f32> {
// CHECK: %[[RHS_REAL_FINITE:.*]] = arith.cmpf one, %[[RHS_REAL_ABS]], %[[INF]] : f32
// CHECK: %[[RHS_IMAG_FINITE:.*]] = arith.cmpf one, %[[RHS_IMAG_ABS]], %[[INF]] : f32
// CHECK: %[[RHS_IS_FINITE:.*]] = arith.andi %[[RHS_REAL_FINITE]], %[[RHS_IMAG_FINITE]] : i1
-// CHECK: %[[LHS_REAL_ABS:.*]] = math.abs %[[LHS_REAL]] : f32
+// CHECK: %[[LHS_REAL_ABS:.*]] = math.absf %[[LHS_REAL]] : f32
// CHECK: %[[LHS_REAL_INFINITE:.*]] = arith.cmpf oeq, %[[LHS_REAL_ABS]], %[[INF]] : f32
-// CHECK: %[[LHS_IMAG_ABS:.*]] = math.abs %[[LHS_IMAG]] : f32
+// CHECK: %[[LHS_IMAG_ABS:.*]] = math.absf %[[LHS_IMAG]] : f32
// CHECK: %[[LHS_IMAG_INFINITE:.*]] = arith.cmpf oeq, %[[LHS_IMAG_ABS]], %[[INF]] : f32
// CHECK: %[[LHS_IS_INFINITE:.*]] = arith.ori %[[LHS_REAL_INFINITE]], %[[LHS_IMAG_INFINITE]] : i1
// CHECK: %[[INF_NUM_FINITE_DENOM:.*]] = arith.andi %[[LHS_IS_INFINITE]], %[[RHS_IS_FINITE]] : i1
diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
index 15f454bb348ce..32b5a75be6528 100644
--- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
@@ -170,9 +170,9 @@ gpu.module @test_module {
// CHECK: llvm.func @__nv_fabs(f64) -> f64
// CHECK-LABEL: func @gpu_fabs
func.func @gpu_fabs(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
- %result32 = math.abs %arg_f32 : f32
+ %result32 = math.absf %arg_f32 : f32
// CHECK: llvm.call @__nv_fabsf(%{{.*}}) : (f32) -> f32
- %result64 = math.abs %arg_f64 : f64
+ %result64 = math.absf %arg_f64 : f64
// CHECK: llvm.call @__nv_fabs(%{{.*}}) : (f64) -> f64
func.return %result32, %result64 : f32, f64
}
@@ -487,4 +487,3 @@ gpu.module @test_module {
gpu.return
}
}
-
diff --git a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
index a50f5dd0314e6..d2cab517822ce 100644
--- a/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
+++ b/mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl.mlir
@@ -89,9 +89,9 @@ gpu.module @test_module {
// CHECK: llvm.func @__ocml_fabs_f64(f64) -> f64
// CHECK-LABEL: func @gpu_fabs
func.func @gpu_fabs(%arg_f32 : f32, %arg_f64 : f64) -> (f32, f64) {
- %result32 = math.abs %arg_f32 : f32
+ %result32 = math.absf %arg_f32 : f32
// CHECK: llvm.call @__ocml_fabs_f32(%{{.*}}) : (f32) -> f32
- %result64 = math.abs %arg_f64 : f64
+ %result64 = math.absf %arg_f64 : f64
// CHECK: llvm.call @__ocml_fabs_f64(%{{.*}}) : (f64) -> f64
func.return %result32, %result64 : f32, f64
}
diff --git a/mlir/test/Conversion/MathToSPIRV/math-to-gl-spirv.mlir b/mlir/test/Conversion/MathToSPIRV/math-to-gl-spirv.mlir
index b3890a5436a84..1302b47182ce1 100644
--- a/mlir/test/Conversion/MathToSPIRV/math-to-gl-spirv.mlir
+++ b/mlir/test/Conversion/MathToSPIRV/math-to-gl-spirv.mlir
@@ -29,7 +29,7 @@ func.func @float32_unary_scalar(%arg0: f32) {
// CHECK: spv.GL.Sin %{{.*}}: f32
%8 = math.sin %arg0 : f32
// CHECK: spv.GL.FAbs %{{.*}}: f32
- %9 = math.abs %arg0 : f32
+ %9 = math.absf %arg0 : f32
// CHECK: spv.GL.Ceil %{{.*}}: f32
%10 = math.ceil %arg0 : f32
// CHECK: spv.GL.Floor %{{.*}}: f32
diff --git a/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir b/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir
index 6e9791c8b917d..f336e7333f95f 100644
--- a/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir
+++ b/mlir/test/Conversion/MathToSPIRV/math-to-opencl-spirv.mlir
@@ -27,7 +27,7 @@ func.func @float32_unary_scalar(%arg0: f32) {
// CHECK: spv.CL.sin %{{.*}}: f32
%8 = math.sin %arg0 : f32
// CHECK: spv.CL.fabs %{{.*}}: f32
- %9 = math.abs %arg0 : f32
+ %9 = math.absf %arg0 : f32
// CHECK: spv.CL.ceil %{{.*}}: f32
%10 = math.ceil %arg0 : f32
// CHECK: spv.CL.floor %{{.*}}: f32
diff --git a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
index 193c49c4ee87b..cd405cdd03b04 100644
--- a/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
+++ b/mlir/test/Conversion/TosaToLinalg/tosa-to-linalg.mlir
@@ -7,7 +7,7 @@ func.func @test_abs(%arg0: tensor<f32>) -> tensor<f32> {
// CHECK: [[INIT:%.+]] = linalg.init_tensor [] : tensor<f32>
// CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]]], iterator_types = []} ins(%arg0 : tensor<f32>) outs([[INIT]] : tensor<f32>) {
// CHECK: ^bb0(%arg1: f32, %arg2: f32):
- // CHECK: [[ELEMENT:%.+]] = math.abs %arg1
+ // CHECK: [[ELEMENT:%.+]] = math.absf %arg1
// CHECK: linalg.yield [[ELEMENT]] : f32
// CHECK: } -> tensor<f32>
@@ -26,7 +26,7 @@ func.func @test_abs(%arg0: tensor<2xf32>) -> tensor<2xf32> {
// CHECK: [[INIT:%.+]] = linalg.init_tensor [2] : tensor<2xf32>
// CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]]], iterator_types = ["parallel"]} ins(%arg0 : tensor<2xf32>) outs([[INIT]] : tensor<2xf32>) {
// CHECK: ^bb0(%arg1: f32, %arg2: f32):
- // CHECK: [[ELEMENT:%.+]] = math.abs %arg1
+ // CHECK: [[ELEMENT:%.+]] = math.absf %arg1
// CHECK: linalg.yield [[ELEMENT]] : f32
// CHECK: } -> tensor<2xf32>
%0 = "tosa.abs"(%arg0) : (tensor<2xf32>) -> tensor<2xf32>
@@ -44,7 +44,7 @@ func.func @test_abs(%arg0: tensor<2x3xf32>) -> tensor<2x3xf32> {
// CHECK: [[INIT:%.+]] = linalg.init_tensor [2, 3] : tensor<2x3xf32>
// CHECK: [[GENERIC:%.+]] = linalg.generic {indexing_maps = [#[[$MAP0]], #[[$MAP0]]], iterator_types = ["parallel", "parallel"]} ins(%arg0 : tensor<2x3xf32>) outs([[INIT]] : tensor<2x3xf32>) {
// CHECK: ^bb0(%arg1: f32, %arg2: f32):
- // CHECK: [[ELEMENT:%.+]] = math.abs %arg1
+ // CHECK: [[ELEMENT:%.+]] = math.absf %arg1
// CHECK: linalg.yield [[ELEMENT]] : f32
// CHECK: } -> tensor<2x3xf32>
%0 = "tosa.abs"(%arg0) : (tensor<2x3xf32>) -> tensor<2x3xf32>
@@ -61,7 +61,7 @@ func.func @test_abs(%arg0: tensor<?xf32>) -> tensor<?xf32> {
// CHECK: %[[DIM:.+]] = tensor.dim %arg0, %[[C0]]
// CHECK: %[[INIT:.+]] = linalg.init_tensor [%[[DIM]]]
// CHECK: linalg.generic
- // CHECK: math.abs
+ // CHECK: math.absf
%0 = "tosa.abs"(%arg0) : (tensor<?xf32>) -> tensor<?xf32>
return %0 : tensor<?xf32>
}
@@ -76,7 +76,7 @@ func.func @test_abs_dyn(%arg0: tensor<2x?xf32>) -> tensor<2x?xf32> {
// CHECK: %[[DIM:.+]] = tensor.dim %arg0, %[[C1]]
// CHECK: %[[INIT:.+]] = linalg.init_tensor [2, %[[DIM]]]
// CHECK: linalg.generic
- // CHECK: math.abs
+ // CHECK: math.absf
%0 = "tosa.abs"(%arg0) : (tensor<2x?xf32>) -> tensor<2x?xf32>
return %0 : tensor<2x?xf32>
}
@@ -146,7 +146,7 @@ func.func @test_simple_f32(%arg0: tensor<1xf32>) -> () {
%0 = "tosa.tanh"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
// CHECK: linalg.generic
- // CHECK: math.abs
+ // CHECK: math.absf
%1 = "tosa.abs"(%arg0) : (tensor<1xf32>) -> tensor<1xf32>
// CHECK: linalg.generic
@@ -1252,7 +1252,7 @@ func.func @pad_float(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
// CHECK-DAG: [[INDEX4:%.+]] = arith.constant 4 : index
// CHECK-DAG: [[CST:%.+]] = arith.constant 0.000000e+00 : f32
// CHECK: tensor.pad %arg0 low{{\[}}%{{.*}}, [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] {
- // CHECK: ^bb0(%arg1: index, %arg2: index):
+ // CHECK: ^bb0(%arg1: index, %arg2: index):
// CHECK: tensor.yield [[CST]]
// CHECK: } : tensor<1x2xf32> to tensor<4x9xf32>
%1 = "tosa.pad"(%arg0, %0) : (tensor<1x2xf32>, tensor<2x2xi32>) -> (tensor<4x9xf32>)
@@ -1288,7 +1288,7 @@ func.func @pad_float_explicit(%arg0 : tensor<1x2xf32>) -> (tensor<4x9xf32>) {
// CHECK-DAG: [[INDEX4:%.+]] = arith.constant 4 : index
// CHECK-DAG: [[CST:%.+]] = arith.constant 4.200000e+01 : f32
// CHECK: tensor.pad %arg0 low{{\[}}%{{.*}}, [[INDEX3]]] high{{\[}}[[INDEX2]], [[INDEX4]]] {
- // CHECK: ^bb0(%arg1: index, %arg2: index):
+ // CHECK: ^bb0(%arg1: index, %arg2: index):
// CHECK: tensor.yield [[CST]]
// CHECK: } : tensor<1x2xf32> to tensor<4x9xf32>
%1 = arith.constant dense<42.0> : tensor<f32>
diff --git a/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir b/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir
index a769684b57bb5..fbba581e3df8e 100644
--- a/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir
+++ b/mlir/test/Dialect/Linalg/generalize-named-polymorphic-ops.mlir
@@ -306,7 +306,7 @@ func.func @generalize_elemwise_abs(%lhs : tensor<4x8xf32>, %output : tensor<4x8x
}
// CHECK-LABEL: @generalize_elemwise_abs
-// CHECK: = math.abs
+// CHECK: = math.absf
// -----
diff --git a/mlir/test/Dialect/Math/canonicalize.mlir b/mlir/test/Dialect/Math/canonicalize.mlir
index 5028c2844a06a..5fde49ed96db9 100644
--- a/mlir/test/Dialect/Math/canonicalize.mlir
+++ b/mlir/test/Dialect/Math/canonicalize.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -canonicalize | FileCheck %s
+// RUN: mlir-opt %s -canonicalize | FileCheck %s
// CHECK-LABEL: @ceil_fold
// CHECK: %[[cst:.+]] = arith.constant 1.000000e+00 : f32
@@ -125,7 +125,7 @@ func.func @sqrt_fold_vec() -> (vector<4xf32>) {
// CHECK: return %[[cst]]
func.func @abs_fold() -> f32 {
%c = arith.constant -4.0 : f32
- %r = math.abs %c : f32
+ %r = math.absf %c : f32
return %r : f32
}
diff --git a/mlir/test/Dialect/Math/polynomial-approximation.mlir b/mlir/test/Dialect/Math/polynomial-approximation.mlir
index 1ba82696744dd..33ac11b29b9c6 100644
--- a/mlir/test/Dialect/Math/polynomial-approximation.mlir
+++ b/mlir/test/Dialect/Math/polynomial-approximation.mlir
@@ -359,7 +359,7 @@ func.func @log1p_vector(%arg0: vector<8xf32>) -> vector<8xf32> {
// CHECK: %[[VAL_16:.*]] = arith.select %[[VAL_15]], %[[VAL_0]], %[[VAL_2]] : f32
// CHECK: %[[VAL_17:.*]] = arith.cmpf ugt, %[[VAL_16]], %[[VAL_1]] : f32
// CHECK: %[[VAL_18:.*]] = arith.select %[[VAL_17]], %[[VAL_16]], %[[VAL_1]] : f32
-// CHECK: %[[VAL_19:.*]] = math.abs %[[VAL_0]] : f32
+// CHECK: %[[VAL_19:.*]] = math.absf %[[VAL_0]] : f32
// CHECK: %[[VAL_20:.*]] = arith.cmpf olt, %[[VAL_19]], %[[VAL_3]] : f32
// CHECK: %[[VAL_21:.*]] = arith.mulf %[[VAL_18]], %[[VAL_18]] : f32
// CHECK: %[[VAL_22:.*]] = math.fma %[[VAL_21]], %[[VAL_10]], %[[VAL_9]] : f32
@@ -517,7 +517,7 @@ func.func @rsqrt_vector_2x16xf32(%arg0: vector<2x16xf32>) -> vector<2x16xf32> {
// CHECK-DAG: %[[N3:.+]] = arith.constant -0.0106783099
// CHECK-DAG: %[[N4:.+]] = arith.constant 1.00209987
// CHECK-DAG: %[[HALF_PI:.+]] = arith.constant 1.57079637
-// CHECK-DAG: %[[ABS:.+]] = math.abs %arg0
+// CHECK-DAG: %[[ABS:.+]] = math.absf %arg0
// CHECK-DAG: %[[DIV:.+]] = arith.divf %cst, %[[ABS]]
// CHECK-DAG: %[[CMP:.+]] = arith.cmpf olt, %[[ABS]], %[[DIV]]
// CHECK-DAG: %[[SEL:.+]] = arith.select %[[CMP]], %[[ABS]], %[[DIV]]
@@ -547,7 +547,7 @@ func.func @atan_scalar(%arg0: f32) -> f32 {
// CHECK-DAG: %[[ARG0:.+]] = arith.extf %arg0 : f16 to f32
// CHECK-DAG: %[[ARG1:.+]] = arith.extf %arg1 : f16 to f32
// CHECK-DAG: %[[RATIO:.+]] = arith.divf %[[ARG0]], %[[ARG1]]
-// CHECK-DAG: %[[ABS:.+]] = math.abs %[[RATIO]]
+// CHECK-DAG: %[[ABS:.+]] = math.absf %[[RATIO]]
// CHECK-DAG: %[[DIV:.+]] = arith.divf %cst, %[[ABS]]
// CHECK-DAG: %[[CMP:.+]] = arith.cmpf olt, %[[ABS]], %[[DIV]]
// CHECK-DAG: %[[SEL:.+]] = arith.select %[[CMP]], %[[ABS]], %[[DIV]]
@@ -593,4 +593,3 @@ func.func @atan2_scalar(%arg0: f16, %arg1: f16) -> f16 {
%0 = math.atan2 %arg0, %arg1 : f16
return %0 : f16
}
-
diff --git a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
index 8e78f5dafea4e..2b9235d588a18 100644
--- a/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
+++ b/mlir/test/Dialect/SparseTensor/sparse_fp_ops.mlir
@@ -44,7 +44,7 @@
// CHECK: scf.for %[[VAL_10:.*]] = %[[VAL_8]] to %[[VAL_9]] step %[[VAL_3]] {
// CHECK: %[[VAL_11:.*]] = memref.load %[[VAL_5]]{{\[}}%[[VAL_10]]] : memref<?xindex>
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_10]]] : memref<?xf64>
-// CHECK: %[[VAL_13:.*]] = math.abs %[[VAL_12]] : f64
+// CHECK: %[[VAL_13:.*]] = math.absf %[[VAL_12]] : f64
// CHECK: memref.store %[[VAL_13]], %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<32xf64>
// CHECK: }
// CHECK: %[[VAL_14:.*]] = bufferization.to_tensor %[[VAL_7]] : memref<32xf64>
@@ -56,7 +56,7 @@ func.func @abs(%arga: tensor<32xf64, #SV>,
ins(%arga: tensor<32xf64, #SV>)
outs(%argx: tensor<32xf64>) {
^bb(%a: f64, %x: f64):
- %0 = math.abs %a : f64
+ %0 = math.absf %a : f64
linalg.yield %0 : f64
} -> tensor<32xf64>
return %0 : tensor<32xf64>
@@ -366,7 +366,7 @@ func.func @divbyc(%arga: tensor<32xf64, #SV>,
// CHECK: %[[VAL_12:.*]] = memref.load %[[VAL_6]]{{\[}}%[[VAL_11]]] : memref<?xindex>
// CHECK: memref.store %[[VAL_12]], %[[VAL_8]]{{\[}}%[[VAL_1]]] : memref<?xindex>
// CHECK: %[[VAL_13:.*]] = memref.load %[[VAL_7]]{{\[}}%[[VAL_11]]] : memref<?xf64>
-// CHECK: %[[VAL_14:.*]] = math.abs %[[VAL_13]] : f64
+// CHECK: %[[VAL_14:.*]] = math.absf %[[VAL_13]] : f64
// CHECK: %[[VAL_15:.*]] = math.ceil %[[VAL_14]] : f64
// CHECK: %[[VAL_16:.*]] = math.floor %[[VAL_15]] : f64
// CHECK: %[[VAL_17:.*]] = math.sqrt %[[VAL_16]] : f64
@@ -387,7 +387,7 @@ func.func @zero_preserving_math(%arga: tensor<32xf64, #SV>) -> tensor<32xf64, #S
ins(%arga: tensor<32xf64, #SV>)
outs(%xinp: tensor<32xf64, #SV>) {
^bb(%a: f64, %x: f64):
- %0 = math.abs %a : f64
+ %0 = math.absf %a : f64
%1 = math.ceil %0 : f64
%2 = math.floor %1 : f64
%3 = math.sqrt %2 : f64
diff --git a/mlir/test/IR/core-ops.mlir b/mlir/test/IR/core-ops.mlir
index 729f0c8ca6e18..6ede116bfc2e0 100644
--- a/mlir/test/IR/core-ops.mlir
+++ b/mlir/test/IR/core-ops.mlir
@@ -105,17 +105,17 @@ func.func @standard_instrs(tensor<4x4x?xf32>, f32, i32, index, i64, f16) {
// CHECK: arith.constant false
%75 = arith.constant false
- // CHECK: %{{.*}} = math.abs %arg1 : f32
- %100 = "math.abs"(%f) : (f32) -> f32
+ // CHECK: %{{.*}} = math.absf %arg1 : f32
+ %100 = "math.absf"(%f) : (f32) -> f32
- // CHECK: %{{.*}} = math.abs %arg1 : f32
- %101 = math.abs %f : f32
+ // CHECK: %{{.*}} = math.absf %arg1 : f32
+ %101 = math.absf %f : f32
- // CHECK: %{{.*}} = math.abs %{{.*}}: vector<4xf32>
- %102 = math.abs %vcf32 : vector<4xf32>
+ // CHECK: %{{.*}} = math.absf %{{.*}}: vector<4xf32>
+ %102 = math.absf %vcf32 : vector<4xf32>
- // CHECK: %{{.*}} = math.abs %arg0 : tensor<4x4x?xf32>
- %103 = math.abs %t : tensor<4x4x?xf32>
+ // CHECK: %{{.*}} = math.absf %arg0 : tensor<4x4x?xf32>
+ %103 = math.absf %t : tensor<4x4x?xf32>
// CHECK: %{{.*}} = math.ceil %arg1 : f32
%104 = "math.ceil"(%f) : (f32) -> f32
diff --git a/mlir/test/Target/Cpp/invalid.mlir b/mlir/test/Target/Cpp/invalid.mlir
index d38fed86f9ac4..18dabb9155866 100644
--- a/mlir/test/Target/Cpp/invalid.mlir
+++ b/mlir/test/Target/Cpp/invalid.mlir
@@ -11,8 +11,8 @@ func.func @multiple_blocks() {
// -----
func.func @unsupported_std_op(%arg0: f64) -> f64 {
- // expected-error at +1 {{'math.abs' op unable to find printer for op}}
- %0 = math.abs %arg0 : f64
+ // expected-error at +1 {{'math.absf' op unable to find printer for op}}
+ %0 = math.absf %arg0 : f64
return %0 : f64
}
diff --git a/mlir/test/python/dialects/linalg/opdsl/emit_misc.py b/mlir/test/python/dialects/linalg/opdsl/emit_misc.py
index ddb5cc8248024..aad714998c108 100644
--- a/mlir/test/python/dialects/linalg/opdsl/emit_misc.py
+++ b/mlir/test/python/dialects/linalg/opdsl/emit_misc.py
@@ -92,7 +92,7 @@ def test_f32_elemwise_log(input, init_result):
# CHECK-LABEL: @test_f32_elemwise_abs
# CHECK: ^{{.*}}(%[[IN:.+]]: f32, %[[OUT:.+]]: f32)
- # CHECK-NEXT: %[[EXP:.+]] = math.abs %[[IN]] : f32
+ # CHECK-NEXT: %[[EXP:.+]] = math.absf %[[IN]] : f32
# CHECK-NEXT: linalg.yield %[[EXP]] : f32
# CHECK-NEXT: -> tensor<4x16xf32>
@func.FuncOp.from_py_func(
More information about the flang-commits
mailing list