[Mlir-commits] [mlir] [MLIR][MathToXeVM] Remove requirement for ModuleOp op type for MathToXeVM (PR #163619)
Ian Li
llvmlistbot at llvm.org
Wed Oct 15 13:17:06 PDT 2025
https://github.com/ianayl updated https://github.com/llvm/llvm-project/pull/163619
>From 24fbbcb80a40556dd30a14da7352ee117f79040c Mon Sep 17 00:00:00 2001
From: Ian Li <ian.li at intel.com>
Date: Wed, 15 Oct 2025 12:31:26 -0700
Subject: [PATCH 1/2] remove hard dependency on modules for mathtoxevm
---
mlir/include/mlir/Conversion/Passes.td | 2 +-
.../MathToXeVM/check-module-behavior.mlir | 42 +++++++++++++++++++
2 files changed, 43 insertions(+), 1 deletion(-)
create mode 100644 mlir/test/Conversion/MathToXeVM/check-module-behavior.mlir
diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td
index 25e9d34f3e653..20db6c7d3f410 100644
--- a/mlir/include/mlir/Conversion/Passes.td
+++ b/mlir/include/mlir/Conversion/Passes.td
@@ -800,7 +800,7 @@ def ConvertMathToSPIRVPass : Pass<"convert-math-to-spirv"> {
// MathToXeVM
//===----------------------------------------------------------------------===//
-def ConvertMathToXeVM : Pass<"convert-math-to-xevm", "ModuleOp"> {
+def ConvertMathToXeVM : Pass<"convert-math-to-xevm"> {
let summary =
"Convert (fast) math operations to native XeVM/SPIRV equivalents";
let description = [{
diff --git a/mlir/test/Conversion/MathToXeVM/check-module-behavior.mlir b/mlir/test/Conversion/MathToXeVM/check-module-behavior.mlir
new file mode 100644
index 0000000000000..4757153c69094
--- /dev/null
+++ b/mlir/test/Conversion/MathToXeVM/check-module-behavior.mlir
@@ -0,0 +1,42 @@
+// RUN: mlir-opt --pass-pipeline="builtin.module(convert-math-to-xevm)" %s \
+// RUN: | FileCheck %s -check-prefixes='CHECK,CHECK-ENTIRE-MODULE'
+// RUN: mlir-opt --pass-pipeline="builtin.module(gpu.module(convert-math-to-xevm))" %s \
+// RUN: | FileCheck %s -check-prefixes='CHECK,CHECK-ONLY-GPU'
+//
+// Check that MathToXeVM handles nested modules while respecting pass manager.
+
+// CHECK-LABEL: @test_module
+module @test_module {
+ // CHECK-ENTIRE-MODULE: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
+ // CHECK-ONLY-GPU-NOT: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
+
+ // CHECK-LABEL: @test_gpu
+ gpu.module @test_gpu {
+ // CHECK: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
+ gpu.func @exp_gpu() {
+ %c1_f32 = arith.constant 1. : f32
+
+ // CHECK: math.exp
+ %exp_normal_f32 = math.exp %c1_f32 : f32
+
+ // CHECK: llvm.call @_Z22__spirv_ocl_native_expf(%{{.*}}) {fastmathFlags = #llvm.fastmath<afn>} : (f32) -> f32
+ %exp_fast_f32 = math.exp %c1_f32 fastmath<afn> : f32
+
+ gpu.return
+ }
+ }
+
+ // CHECK-LABEL: @exp_func
+ func.func @exp_func() {
+ %c1_f32 = arith.constant 1. : f32
+
+ // CHECK: math.exp
+ %exp_normal_f32 = math.exp %c1_f32 : f32
+
+ // CHECK-ENTIRE-MODULE: llvm.call @_Z22__spirv_ocl_native_expf(%{{.*}}) {fastmathFlags = #llvm.fastmath<afn>} : (f32) -> f32
+ // CHECK-ONLY-GPU: math.exp
+ %exp_fast_f32 = math.exp %c1_f32 fastmath<afn> : f32
+
+ return
+ }
+}
>From d709641969398d74c6ad5d19d4ff4bab325976f7 Mon Sep 17 00:00:00 2001
From: Ian Li <ian.li at intel.com>
Date: Wed, 15 Oct 2025 13:16:55 -0700
Subject: [PATCH 2/2] move check-module-behavior.mlir to math-to-xevm.mlir
---
.../MathToXeVM/check-module-behavior.mlir | 42 ------------------
.../Conversion/MathToXeVM/math-to-xevm.mlir | 44 +++++++++++++++++++
2 files changed, 44 insertions(+), 42 deletions(-)
delete mode 100644 mlir/test/Conversion/MathToXeVM/check-module-behavior.mlir
diff --git a/mlir/test/Conversion/MathToXeVM/check-module-behavior.mlir b/mlir/test/Conversion/MathToXeVM/check-module-behavior.mlir
deleted file mode 100644
index 4757153c69094..0000000000000
--- a/mlir/test/Conversion/MathToXeVM/check-module-behavior.mlir
+++ /dev/null
@@ -1,42 +0,0 @@
-// RUN: mlir-opt --pass-pipeline="builtin.module(convert-math-to-xevm)" %s \
-// RUN: | FileCheck %s -check-prefixes='CHECK,CHECK-ENTIRE-MODULE'
-// RUN: mlir-opt --pass-pipeline="builtin.module(gpu.module(convert-math-to-xevm))" %s \
-// RUN: | FileCheck %s -check-prefixes='CHECK,CHECK-ONLY-GPU'
-//
-// Check that MathToXeVM handles nested modules while respecting pass manager.
-
-// CHECK-LABEL: @test_module
-module @test_module {
- // CHECK-ENTIRE-MODULE: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
- // CHECK-ONLY-GPU-NOT: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
-
- // CHECK-LABEL: @test_gpu
- gpu.module @test_gpu {
- // CHECK: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
- gpu.func @exp_gpu() {
- %c1_f32 = arith.constant 1. : f32
-
- // CHECK: math.exp
- %exp_normal_f32 = math.exp %c1_f32 : f32
-
- // CHECK: llvm.call @_Z22__spirv_ocl_native_expf(%{{.*}}) {fastmathFlags = #llvm.fastmath<afn>} : (f32) -> f32
- %exp_fast_f32 = math.exp %c1_f32 fastmath<afn> : f32
-
- gpu.return
- }
- }
-
- // CHECK-LABEL: @exp_func
- func.func @exp_func() {
- %c1_f32 = arith.constant 1. : f32
-
- // CHECK: math.exp
- %exp_normal_f32 = math.exp %c1_f32 : f32
-
- // CHECK-ENTIRE-MODULE: llvm.call @_Z22__spirv_ocl_native_expf(%{{.*}}) {fastmathFlags = #llvm.fastmath<afn>} : (f32) -> f32
- // CHECK-ONLY-GPU: math.exp
- %exp_fast_f32 = math.exp %c1_f32 fastmath<afn> : f32
-
- return
- }
-}
diff --git a/mlir/test/Conversion/MathToXeVM/math-to-xevm.mlir b/mlir/test/Conversion/MathToXeVM/math-to-xevm.mlir
index d76627bb4201c..c61640c2afc4f 100644
--- a/mlir/test/Conversion/MathToXeVM/math-to-xevm.mlir
+++ b/mlir/test/Conversion/MathToXeVM/math-to-xevm.mlir
@@ -3,6 +3,15 @@
// RUN: mlir-opt %s -convert-math-to-xevm='convert-arith=false' \
// RUN: | FileCheck %s -check-prefixes='CHECK,CHECK-NO-ARITH'
+// RUN: mlir-opt --pass-pipeline="builtin.module(convert-math-to-xevm)" %s \
+// RUN: | FileCheck %s -check-prefixes='CHECK-MODULE,CHECK-ENTIRE-MODULE'
+// RUN: mlir-opt --pass-pipeline="builtin.module(gpu.module(convert-math-to-xevm))" %s \
+// RUN: | FileCheck %s -check-prefixes='CHECK-MODULE,CHECK-ONLY-GPU'
+
+// This test:
+// - check that MathToXeVM converts fastmath math/arith ops properly;
+// - check that MathToXeVM handles nested modules while respecting pass manager.
+
module @test_module {
// CHECK-DAG: llvm.func @_Z22__spirv_ocl_native_expDh(f16) -> f16
// CHECK-DAG: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
@@ -152,4 +161,39 @@ module @test_module {
return
}
+
+ // Check that MathToXeVM handles nested modules while respecting pass manager:
+
+ // CHECK-ENTIRE-MODULE: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
+ // CHECK-ONLY-GPU-NOT: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
+
+ // CHECK-MODULE-LABEL: @test_gpu
+ gpu.module @test_gpu {
+ // CHECK-MODULE: llvm.func @_Z22__spirv_ocl_native_expf(f32) -> f32
+ gpu.func @exp_gpu() {
+ %c1_f32 = arith.constant 1. : f32
+
+ // CHECK-MODULE: math.exp
+ %exp_normal_f32 = math.exp %c1_f32 : f32
+
+ // CHECK-MODULE: llvm.call @_Z22__spirv_ocl_native_expf(%{{.*}}) {fastmathFlags = #llvm.fastmath<afn>} : (f32) -> f32
+ %exp_fast_f32 = math.exp %c1_f32 fastmath<afn> : f32
+
+ gpu.return
+ }
+ }
+
+ // CHECK-MODULE-LABEL: @exp_func
+ func.func @exp_func() {
+ %c1_f32 = arith.constant 1. : f32
+
+ // CHECK-MODULE: math.exp
+ %exp_normal_f32 = math.exp %c1_f32 : f32
+
+ // CHECK-ENTIRE-MODULE: llvm.call @_Z22__spirv_ocl_native_expf(%{{.*}}) {fastmathFlags = #llvm.fastmath<afn>} : (f32) -> f32
+ // CHECK-ONLY-GPU: math.exp
+ %exp_fast_f32 = math.exp %c1_f32 fastmath<afn> : f32
+
+ return
+ }
}
More information about the Mlir-commits
mailing list