[Mlir-commits] [mlir] [MLIR][LLVM] Remove typed pointer remnants from integration tests (PR #71208)
llvmlistbot at llvm.org
llvmlistbot at llvm.org
Fri Nov 3 10:41:15 PDT 2023
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-mlir-sme
@llvm/pr-subscribers-mlir-vector
Author: Christian Ulmann (Dinistro)
<details>
<summary>Changes</summary>
This commit removes all LLVM dialect typed pointers from the integration tests. Typed pointers have been deprecated for a while now and it's planned to soon remove them from the LLVM dialect.
Related PSA: https://discourse.llvm.org/t/psa-removal-of-typed-pointers-from-the-llvm-dialect/74502
---
Full diff: https://github.com/llvm/llvm-project/pull/71208.diff
21 Files Affected:
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sampled-matmul-lib.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sddmm-lib.mlir (+1-1)
- (modified) mlir/test/Integration/Dialect/SparseTensor/python/test_output.py (+2-2)
- (modified) mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir (+4-4)
- (modified) mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-inline-asm-vector-avx512.mlir (+4-5)
- (modified) mlir/test/Integration/GPU/CUDA/sm90/tma_load_128x64_swizzle128b.mlir (+1-1)
- (modified) mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x64_swizzle128b.mlir (+1-1)
``````````diff
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir
index e1cdc9ed6ba3d41..b77c1b42baf7ec6 100755
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/block.mlir
@@ -25,7 +25,7 @@
// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false
// R_UN: %{compile} | env %{env} %{run} | FileCheck %s
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#BSR = #sparse_tensor.encoding<{
map = (i, j) ->
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
index 4ef8b29ee4e1a84..f11d396dc6f8f7d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/dense_output.mlir
@@ -31,7 +31,7 @@
// Do the same run, but now with direct IR generation and VLA vectorization.
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#DenseMatrix = #sparse_tensor.encoding<{
map = (d0, d1) -> (d0 : dense, d1 : dense)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir
index 773c34e1f3dabca..8c81e9df6a0e41d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_ds.mlir
@@ -25,7 +25,7 @@
// REDEFINE: %{sparse_compiler_opts} = enable-runtime-library=false
// R_UN: %{compile} | env %{env} %{run} | FileCheck %s
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#CSR = #sparse_tensor.encoding<{
map = (i, j) -> ( i : dense, j : compressed)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
index 60f2e22ab4a8200..837ea4038cac8b8 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_flatten.mlir
@@ -31,7 +31,7 @@
// Do the same run, but now with direct IR generation and VLA vectorization.
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#SparseTensor = #sparse_tensor.encoding<{
// Note that any dimToLvl permutation should give the same results
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
index 19648b25fd7c15f..e2d5e2d976415b7 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_matvec.mlir
@@ -40,7 +40,7 @@
// vectorization.
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#SparseMatrix = #sparse_tensor.encoding<{
map = (d0, d1) -> (d0 : dense, d1 : compressed),
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
index 306b88149e736dc..ed4dc73a43226f5 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_mttkrp.mlir
@@ -32,7 +32,7 @@
// vectorization.
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#SparseTensor = #sparse_tensor.encoding<{
map = (d0, d1, d2) -> (d0 : compressed, d1 : compressed, d2 : compressed)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
index b466cf242da52a6..911785030ba4294 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_out_simple.mlir
@@ -28,7 +28,7 @@
// vectorization.
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#DCSR = #sparse_tensor.encoding<{
map = (d0, d1) -> (d0 : compressed, d1 : compressed)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
index b1249c73806b16f..2b134d94a9dcf3a 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sampled_matmul.mlir
@@ -32,7 +32,7 @@
// vectorization.
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#SparseMatrix = #sparse_tensor.encoding<{
map = (d0, d1) -> (d0 : compressed, d1 : compressed),
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
index b789450b4f88bba..e7690adac534d89 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sorted_coo.mlir
@@ -32,7 +32,7 @@
// Do the same run, but now with VLA vectorization.
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#SortedCOO = #sparse_tensor.encoding<{
map = (d0, d1) -> (d0 : compressed(nonunique), d1 : singleton)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
index e8a9ea6e2c5a775..5459293383015ed 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_spmm.mlir
@@ -31,7 +31,7 @@
// Do the same run, but now with VLA vectorization.
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | env %{env} %{run_sve} | FileCheck %s %}
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#SparseMatrix = #sparse_tensor.encoding<{
map = (d0, d1) -> (d0 : dense, d1 : compressed)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
index 99b596f869ec09b..89383d7ec84eb6f 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum.mlir
@@ -34,7 +34,7 @@
// TODO: The test currently only operates on the triangular part of the
// symmetric matrix.
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#SparseMatrix = #sparse_tensor.encoding<{
map = (d0, d1) -> (d0 : compressed, d1 : compressed)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir
index cbedd2300b0eee3..d5e519efb916d1b 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_bf16.mlir
@@ -31,7 +31,7 @@
// UNSUPPORTED: target=aarch64{{.*}}
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#SparseMatrix = #sparse_tensor.encoding<{
map = (d0, d1) -> (d0 : compressed, d1 : compressed)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir
index 13f4f221dff05e5..4a69125394c0b54 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_c32.mlir
@@ -34,7 +34,7 @@
// TODO: The test currently only operates on the triangular part of the
// symmetric matrix.
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#SparseMatrix = #sparse_tensor.encoding<{
map = (d0, d1) -> (d0 : compressed, d1 : compressed)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir
index dfb23d6afc64bce..f6f55b7ab2d7d4d 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/CPU/sparse_sum_f16.mlir
@@ -30,7 +30,7 @@
// Do the same run, but now with VLA vectorization.
// RUN: %if mlir_arm_sve_tests %{ %{compile_sve} | %{run_sve} | FileCheck %s %}
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#SparseMatrix = #sparse_tensor.encoding<{
map = (d0, d1) -> (d0 : compressed, d1 : compressed)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sampled-matmul-lib.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sampled-matmul-lib.mlir
index ac5c0f8bead0773..e79696ac4c047ca 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sampled-matmul-lib.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sampled-matmul-lib.mlir
@@ -26,7 +26,7 @@
// RUNNOT: %{compile} enable-runtime-library=false gpu-data-transfer-strategy=zero-copy" | %{run}
//
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#CSR = #sparse_tensor.encoding<{
map = (d0, d1) -> (d0 : dense, d1 : compressed)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sddmm-lib.mlir b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sddmm-lib.mlir
index 54408d629ec22ec..c1062dd4ee3e938 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sddmm-lib.mlir
+++ b/mlir/test/Integration/Dialect/SparseTensor/GPU/CUDA/sparse-sddmm-lib.mlir
@@ -21,7 +21,7 @@
// R_UN: %{compile} enable-runtime-library=false" | %{run}
//
-!Filename = !llvm.ptr<i8>
+!Filename = !llvm.ptr
#CSR = #sparse_tensor.encoding<{
map = (d0, d1) -> (d0 : dense, d1 : compressed)
diff --git a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
index 5a8c92f7cd21fc5..c9efadb60480c54 100644
--- a/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
+++ b/mlir/test/Integration/Dialect/SparseTensor/python/test_output.py
@@ -19,11 +19,11 @@
def boilerplate(attr: st.EncodingAttr):
"""Returns boilerplate main method."""
return f"""
-func.func @main(%p : !llvm.ptr<i8>) -> () attributes {{ llvm.emit_c_interface }} {{
+func.func @main(%p : !llvm.ptr) -> () attributes {{ llvm.emit_c_interface }} {{
%d = arith.constant sparse<[[0, 0], [1, 1], [0, 9], [9, 0], [4, 4]],
[1.0, 2.0, 3.0, 4.0, 5.0]> : tensor<10x10xf64>
%a = sparse_tensor.convert %d : tensor<10x10xf64> to tensor<10x10xf64, {attr}>
- sparse_tensor.out %a, %p : tensor<10x10xf64, {attr}>, !llvm.ptr<i8>
+ sparse_tensor.out %a, %p : tensor<10x10xf64, {attr}>, !llvm.ptr
return
}}
"""
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir
index 78f1bede5a6a529..5a9fccdc31640c0 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/ArmSME/load-store-128-bit-tile.mlir
@@ -37,10 +37,10 @@ func.func @vector_copy_i128(%src: memref<?x?xi128>, %dst: memref<?x?xi128>) {
}
func.func @test_load_store_zaq0() {
- %init_a_str = llvm.mlir.addressof @init_tile_a : !llvm.ptr<array<17 x i8>>
- %init_b_str = llvm.mlir.addressof @init_tile_b : !llvm.ptr<array<17 x i8>>
- %final_a_str = llvm.mlir.addressof @final_tile_a : !llvm.ptr<array<17 x i8>>
- %final_b_str = llvm.mlir.addressof @final_tile_b : !llvm.ptr<array<17 x i8>>
+ %init_a_str = llvm.mlir.addressof @init_tile_a : !llvm.ptr
+ %init_b_str = llvm.mlir.addressof @init_tile_b : !llvm.ptr
+ %final_a_str = llvm.mlir.addressof @final_tile_a : !llvm.ptr
+ %final_b_str = llvm.mlir.addressof @final_tile_b : !llvm.ptr
%c0 = arith.constant 0 : index
%min_elts_q = arith.constant 1 : index
diff --git a/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-inline-asm-vector-avx512.mlir b/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-inline-asm-vector-avx512.mlir
index 4b57a2924883736..828e498543a9f20 100644
--- a/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-inline-asm-vector-avx512.mlir
+++ b/mlir/test/Integration/Dialect/Vector/CPU/X86Vector/test-inline-asm-vector-avx512.mlir
@@ -13,18 +13,17 @@ module {
llvm.func @entry() -> i32 {
%c0 = llvm.mlir.constant(0 : index) : i64
- %1 = llvm.mlir.addressof @const16 : !llvm.ptr<array<16 x i32>>
+ %1 = llvm.mlir.addressof @const16 : !llvm.ptr
%ptr = llvm.getelementptr %1[%c0, %c0]
- : (!llvm.ptr<array<16 x i32>>, i64, i64) -> !llvm.ptr<i32>
- %ptr2 = llvm.bitcast %ptr : !llvm.ptr<i32> to !llvm.ptr<vector<16xi32>>
+ : (!llvm.ptr, i64, i64) -> !llvm.ptr, !llvm.array<16 x i32>
// operand_attrs of *m operands need to be piped through to LLVM for
// verification to pass.
%v = llvm.inline_asm
asm_dialect = intel
operand_attrs = [{ elementtype = vector<16xi32> }]
- "vmovdqu32 $0, $1", "=x,*m" %ptr2
- : (!llvm.ptr<vector<16xi32>>) -> vector<16xi32>
+ "vmovdqu32 $0, $1", "=x,*m" %ptr
+ : (!llvm.ptr) -> vector<16xi32>
// CHECK: 0
%v0 = vector.extract %v[0]: i32 from vector<16xi32>
diff --git a/mlir/test/Integration/GPU/CUDA/sm90/tma_load_128x64_swizzle128b.mlir b/mlir/test/Integration/GPU/CUDA/sm90/tma_load_128x64_swizzle128b.mlir
index 2ad39405cc06f4b..19f88306050afb8 100644
--- a/mlir/test/Integration/GPU/CUDA/sm90/tma_load_128x64_swizzle128b.mlir
+++ b/mlir/test/Integration/GPU/CUDA/sm90/tma_load_128x64_swizzle128b.mlir
@@ -31,7 +31,7 @@
module @mymod {
func.func private @printMemrefF32(memref<*xf32>)
memref.global "private" @bufferLhsGlobal : !shmemlhs
- llvm.func @printf(!llvm.ptr<i8>, ...) -> i32
+ llvm.func @printf(!llvm.ptr, ...) -> i32
func.func @main() {
%c8192 = arith.constant 8192 : index
%c-1_i32 = arith.constant -1 : i32
diff --git a/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x64_swizzle128b.mlir b/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x64_swizzle128b.mlir
index 13b9c48dabe85d7..4ce8db0f2cba212 100644
--- a/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x64_swizzle128b.mlir
+++ b/mlir/test/Integration/GPU/CUDA/sm90/tma_load_64x64_swizzle128b.mlir
@@ -41,7 +41,7 @@ module @mymod {
func.func private @printMemrefF32(memref<*xf32>)
memref.global "private" @bufferLhsGlobal : !shmemlhs
memref.global "private" @bufferRhsGlobal : !shmemrhs
- llvm.func @printf(!llvm.ptr<i8>, ...) -> i32
+ llvm.func @printf(!llvm.ptr, ...) -> i32
func.func @main() {
%c32768 = arith.constant 32768 : index
%c-1_i32 = arith.constant -1 : i32
``````````
</details>
https://github.com/llvm/llvm-project/pull/71208
More information about the Mlir-commits
mailing list