[Mlir-commits] [mlir] Add host-supports-nvptx requirement to lit tests (PR #66102)

llvmlistbot at llvm.org llvmlistbot at llvm.org
Tue Sep 12 08:37:02 PDT 2023


llvmbot wrote:

@llvm/pr-subscribers-mlir

<details>
<summary>Changes</summary>

None
--
Full diff: https://github.com/llvm/llvm-project/pull/66102.diff

23 Files Affected:

- (modified) mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/dump-ptx.mlir (+2) 
- (modified) mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-mma-2-4-f16.mlir (+2) 
- (modified) mlir/test/Integration/Dialect/Vector/GPU/CUDA/test-reduction-distribute.mlir (+2) 
- (modified) mlir/test/Integration/Dialect/Vector/GPU/CUDA/test-warp-distribute.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f16-f16-accum.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f32.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32-bare-ptr.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/all-reduce-and.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/all-reduce-max.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/all-reduce-min.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/all-reduce-op.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/all-reduce-or.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/all-reduce-region.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/all-reduce-xor.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/async.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/gpu-to-cubin.mlir (+3-1) 
- (modified) mlir/test/Integration/GPU/CUDA/multiple-all-reduce.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/printf.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/shuffle.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/sm90/transform-dialect/tma_load_64x8_8x128_noswizzle-transform.mlir (+2) 
- (modified) mlir/test/Integration/GPU/CUDA/two-modules.mlir (+2) 


<pre>
diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/dump-ptx.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/dump-ptx.mlir
index 0cb06b7bf1d2001..5b4bdbe31dab334 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/dump-ptx.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/dump-ptx.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN: | mlir-opt -test-lower-to-nvvm -debug-only=serialize-to-isa \
 // RUN: 2>&1 | FileCheck %s
diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-mma-2-4-f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-mma-2-4-f16.mlir
index 80972f244ec02d7..aee8a6a6558e4f5 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-mma-2-4-f16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-mma-2-4-f16.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 //
 // NOTE: this test requires gpu-sm80
 //
diff --git a/mlir/test/Integration/Dialect/Vector/GPU/CUDA/test-reduction-distribute.mlir b/mlir/test/Integration/Dialect/Vector/GPU/CUDA/test-reduction-distribute.mlir
index 8c991493a2b0174..bc5737427a15160 100644
--- a/mlir/test/Integration/Dialect/Vector/GPU/CUDA/test-reduction-distribute.mlir
+++ b/mlir/test/Integration/Dialect/Vector/GPU/CUDA/test-reduction-distribute.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s -test-vector-warp-distribute="hoist-uniform distribute-transfer-write propagate-distribution" -canonicalize |\
 // RUN: mlir-opt -test-vector-warp-distribute=rewrite-warp-ops-to-scf-if |\
 // RUN: mlir-opt -lower-affine -convert-vector-to-scf -convert-scf-to-cf -convert-vector-to-llvm \
diff --git a/mlir/test/Integration/Dialect/Vector/GPU/CUDA/test-warp-distribute.mlir b/mlir/test/Integration/Dialect/Vector/GPU/CUDA/test-warp-distribute.mlir
index f26c18c4ae3dd28..0efac62bbd7afee 100644
--- a/mlir/test/Integration/Dialect/Vector/GPU/CUDA/test-warp-distribute.mlir
+++ b/mlir/test/Integration/Dialect/Vector/GPU/CUDA/test-warp-distribute.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // Run the test cases without distributing ops to test default lowering. Run
 // everything on the same thread.
 // RUN: mlir-opt %s -test-vector-warp-distribute=rewrite-warp-ops-to-scf-if -canonicalize | \
diff --git a/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f16-f16-accum.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f16-f16-accum.mlir
index 56d1e6d2973562b..d959fdb6a9db178 100644
--- a/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f16-f16-accum.mlir
+++ b/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f16-f16-accum.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN:  -test-transform-dialect-interpreter \
 // RUN:  -test-transform-dialect-erase-schedule \
diff --git a/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f32.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f32.mlir
index 357ab8ec4d75921..0ec15f2a9c79d70 100644
--- a/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f32.mlir
+++ b/mlir/test/Integration/GPU/CUDA/TensorCore/sm80/transform-mma-sync-matmul-f32.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+//
 // RUN: mlir-opt %s \
 // RUN:   -test-transform-dialect-interpreter \
 // RUN: | FileCheck %s --check-prefix=CHECK-MMA-SYNC
diff --git a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir
index 591bf1b4fd18231..4d8a281113593c6 100644
--- a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir
+++ b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f16.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN: | mlir-opt -test-lower-to-nvvm="cubin-chip=sm_70" \
 // RUN: | mlir-cpu-runner \
diff --git a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32-bare-ptr.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32-bare-ptr.mlir
index 51bd23f817b33f1..664d344b2769bf7 100644
--- a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32-bare-ptr.mlir
+++ b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32-bare-ptr.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // Tests memref bare pointer lowering convention both host side and kernel-side;
 // this works for only statically shaped memrefs.
 // Similar to the wmma-matmul-f32 but but with the memref bare pointer lowering convention.
diff --git a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir
index 0307b3d504be9f6..4d76eb898dc2935 100644
--- a/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir
+++ b/mlir/test/Integration/GPU/CUDA/TensorCore/wmma-matmul-f32.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN: | mlir-opt -test-lower-to-nvvm="cubin-chip=sm_70" \
 // RUN: | mlir-cpu-runner \
diff --git a/mlir/test/Integration/GPU/CUDA/all-reduce-and.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-and.mlir
index b131b8682ddee06..c48a515ed022135 100644
--- a/mlir/test/Integration/GPU/CUDA/all-reduce-and.mlir
+++ b/mlir/test/Integration/GPU/CUDA/all-reduce-and.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN: | mlir-opt -test-lower-to-nvvm \
 // RUN: | mlir-cpu-runner \
diff --git a/mlir/test/Integration/GPU/CUDA/all-reduce-max.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-max.mlir
index 155423db7e05049..e8ffc3f830c7c91 100644
--- a/mlir/test/Integration/GPU/CUDA/all-reduce-max.mlir
+++ b/mlir/test/Integration/GPU/CUDA/all-reduce-max.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN: | mlir-opt -test-lower-to-nvvm \
 // RUN: | mlir-cpu-runner \
diff --git a/mlir/test/Integration/GPU/CUDA/all-reduce-min.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-min.mlir
index e5047b6efa3bf25..fde50e9b6b92fbd 100644
--- a/mlir/test/Integration/GPU/CUDA/all-reduce-min.mlir
+++ b/mlir/test/Integration/GPU/CUDA/all-reduce-min.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN: | mlir-opt -test-lower-to-nvvm \
 // RUN: | mlir-cpu-runner \
diff --git a/mlir/test/Integration/GPU/CUDA/all-reduce-op.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-op.mlir
index 163e9fdba60c1a9..08c3571ef1c35fa 100644
--- a/mlir/test/Integration/GPU/CUDA/all-reduce-op.mlir
+++ b/mlir/test/Integration/GPU/CUDA/all-reduce-op.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN: | mlir-opt -test-lower-to-nvvm \
 // RUN: | mlir-cpu-runner \
diff --git a/mlir/test/Integration/GPU/CUDA/all-reduce-or.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-or.mlir
index 381db2639c371f3..134296f39c2b49e 100644
--- a/mlir/test/Integration/GPU/CUDA/all-reduce-or.mlir
+++ b/mlir/test/Integration/GPU/CUDA/all-reduce-or.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN: | mlir-opt -test-lower-to-nvvm \
 // RUN: | mlir-cpu-runner \
diff --git a/mlir/test/Integration/GPU/CUDA/all-reduce-region.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-region.mlir
index 23c6c117e67f36b..c2be1b65950ea51 100644
--- a/mlir/test/Integration/GPU/CUDA/all-reduce-region.mlir
+++ b/mlir/test/Integration/GPU/CUDA/all-reduce-region.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN: | mlir-opt -test-lower-to-nvvm \
 // RUN: | mlir-cpu-runner \
diff --git a/mlir/test/Integration/GPU/CUDA/all-reduce-xor.mlir b/mlir/test/Integration/GPU/CUDA/all-reduce-xor.mlir
index 3c5a100b5b90d57..6b75321b7bfc235 100644
--- a/mlir/test/Integration/GPU/CUDA/all-reduce-xor.mlir
+++ b/mlir/test/Integration/GPU/CUDA/all-reduce-xor.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN: | mlir-opt -test-lower-to-nvvm \
 // RUN: | mlir-cpu-runner \
diff --git a/mlir/test/Integration/GPU/CUDA/async.mlir b/mlir/test/Integration/GPU/CUDA/async.mlir
index d2a5127a34c3bdd..1314d32a779a883 100644
--- a/mlir/test/Integration/GPU/CUDA/async.mlir
+++ b/mlir/test/Integration/GPU/CUDA/async.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN: | mlir-opt -gpu-kernel-outlining \
 // RUN: | mlir-opt -pass-pipeline='builtin.module(gpu.module(strip-debuginfo,convert-gpu-to-nvvm),nvvm-attach-target)' \
diff --git a/mlir/test/Integration/GPU/CUDA/gpu-to-cubin.mlir b/mlir/test/Integration/GPU/CUDA/gpu-to-cubin.mlir
index a5d04f7322b4914..f6ae33b1f373708 100644
--- a/mlir/test/Integration/GPU/CUDA/gpu-to-cubin.mlir
+++ b/mlir/test/Integration/GPU/CUDA/gpu-to-cubin.mlir
@@ -1,5 +1,7 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
-// RUN: | mlir-opt -test-lower-to-nvvm \
+// RUN: | mlir-opt -test-lower-to-nvvm="kernel-index-bitwidth=32 cubin-chip=sm_80 cubin-features=+ptx76" \
 // RUN: | mlir-cpu-runner \
 // RUN:   --shared-libs=%mlir_cuda_runtime \
 // RUN:   --shared-libs=%mlir_runner_utils \
diff --git a/mlir/test/Integration/GPU/CUDA/multiple-all-reduce.mlir b/mlir/test/Integration/GPU/CUDA/multiple-all-reduce.mlir
index 7657bf4732d32b7..3389f805ac63d0f 100644
--- a/mlir/test/Integration/GPU/CUDA/multiple-all-reduce.mlir
+++ b/mlir/test/Integration/GPU/CUDA/multiple-all-reduce.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN: | mlir-opt -test-lower-to-nvvm \
 // RUN: | mlir-cpu-runner \
diff --git a/mlir/test/Integration/GPU/CUDA/printf.mlir b/mlir/test/Integration/GPU/CUDA/printf.mlir
index 1a35d1e78b09475..eef5ac66ca52ad4 100644
--- a/mlir/test/Integration/GPU/CUDA/printf.mlir
+++ b/mlir/test/Integration/GPU/CUDA/printf.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN: | mlir-opt -test-lower-to-nvvm \
 // RUN: | mlir-cpu-runner \
diff --git a/mlir/test/Integration/GPU/CUDA/shuffle.mlir b/mlir/test/Integration/GPU/CUDA/shuffle.mlir
index 40fcea857d5b4eb..05cb854d18dd4f3 100644
--- a/mlir/test/Integration/GPU/CUDA/shuffle.mlir
+++ b/mlir/test/Integration/GPU/CUDA/shuffle.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN: | mlir-opt -test-lower-to-nvvm \
 // RUN: | mlir-cpu-runner \
diff --git a/mlir/test/Integration/GPU/CUDA/sm90/transform-dialect/tma_load_64x8_8x128_noswizzle-transform.mlir b/mlir/test/Integration/GPU/CUDA/sm90/transform-dialect/tma_load_64x8_8x128_noswizzle-transform.mlir
index 882c63a866eb4f3..e66978bc594b1b8 100644
--- a/mlir/test/Integration/GPU/CUDA/sm90/transform-dialect/tma_load_64x8_8x128_noswizzle-transform.mlir
+++ b/mlir/test/Integration/GPU/CUDA/sm90/transform-dialect/tma_load_64x8_8x128_noswizzle-transform.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN:     -test-transform-dialect-interpreter \
 // RUN:     -test-transform-dialect-erase-schedule \
diff --git a/mlir/test/Integration/GPU/CUDA/two-modules.mlir b/mlir/test/Integration/GPU/CUDA/two-modules.mlir
index 5a9acdf3d8da6ba..fde66de2fce6e7e 100644
--- a/mlir/test/Integration/GPU/CUDA/two-modules.mlir
+++ b/mlir/test/Integration/GPU/CUDA/two-modules.mlir
@@ -1,3 +1,5 @@
+// REQUIRES: host-supports-nvptx
+
 // RUN: mlir-opt %s \
 // RUN: | mlir-opt -test-lower-to-nvvm \
 // RUN: | mlir-cpu-runner \
</pre>

</details>

https://github.com/llvm/llvm-project/pull/66102


More information about the Mlir-commits mailing list