[Mlir-commits] [mlir] Reland "[MLIR][FuncToLLVM] Remove typed pointer support" (PR #70717)

Christian Ulmann llvmlistbot at llvm.org
Mon Oct 30 14:47:40 PDT 2023


https://github.com/Dinistro updated https://github.com/llvm/llvm-project/pull/70717

>From c8bd32bec28b1fb41e5472648d863f1ded9ae262 Mon Sep 17 00:00:00 2001
From: Christian Ulmann <christianulmann at gmail.com>
Date: Sat, 28 Oct 2023 23:17:58 +0200
Subject: [PATCH 1/2] [MLIR][FuncToLLVM] Remove typed pointer support

This commit removes the support for lowering Func to LLVM dialect with
typed pointers. Typed pointers have been deprecated for a while now
and it's planned to soon remove them from the LLVM dialect.
---
 mlir/include/mlir/Conversion/Passes.td        |   3 -
 mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp |   1 -
 .../FuncToLLVM/calling-convention.mlir        |   4 +-
 .../FuncToLLVM/convert-argattrs.mlir          |   2 +-
 .../FuncToLLVM/convert-data-layout.mlir       |   2 +-
 .../Conversion/FuncToLLVM/convert-funcs.mlir  |   2 +-
 .../emit-c-wrappers-for-external-callers.mlir |   2 +-
 ...mit-c-wrappers-for-external-functions.mlir |   2 +-
 .../FuncToLLVM/func-memref-return.mlir        |   4 +-
 .../Conversion/FuncToLLVM/func-memref.mlir    |   4 +-
 .../Conversion/FuncToLLVM/func-to-llvm.mlir   |   4 +-
 mlir/test/Conversion/FuncToLLVM/invalid.mlir  |   2 +-
 .../Conversion/FuncToLLVM/typed-pointers.mlir | 114 ------------------
 .../mlir-vulkan-runner/mlir-vulkan-runner.cpp |   3 +-
 14 files changed, 15 insertions(+), 134 deletions(-)
 delete mode 100644 mlir/test/Conversion/FuncToLLVM/typed-pointers.mlir

diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td
index cf6e545749ffc64..a2307bc243f6156 100644
--- a/mlir/include/mlir/Conversion/Passes.td
+++ b/mlir/include/mlir/Conversion/Passes.td
@@ -409,9 +409,6 @@ def ConvertFuncToLLVMPass : Pass<"convert-func-to-llvm", "ModuleOp"> {
     Option<"indexBitwidth", "index-bitwidth", "unsigned",
            /*default=kDeriveIndexBitwidthFromDataLayout*/"0",
            "Bitwidth of the index type, 0 to use size of machine word">,
-    Option<"useOpaquePointers", "use-opaque-pointers", "bool",
-                       /*default=*/"true", "Generate LLVM IR using opaque pointers "
-                       "instead of typed pointers">,
   ];
 }
 
diff --git a/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp b/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp
index 3506f50916132dd..3126d1dee32cbc5 100644
--- a/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp
+++ b/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp
@@ -790,7 +790,6 @@ struct ConvertFuncToLLVMPass
     if (indexBitwidth != kDeriveIndexBitwidthFromDataLayout)
       options.overrideIndexBitwidth(indexBitwidth);
     options.dataLayout = llvm::DataLayout(dataLayout);
-    options.useOpaquePointers = useOpaquePointers;
 
     LLVMTypeConverter typeConverter(&getContext(), options,
                                     &dataLayoutAnalysis);
diff --git a/mlir/test/Conversion/FuncToLLVM/calling-convention.mlir b/mlir/test/Conversion/FuncToLLVM/calling-convention.mlir
index 1ed67708875604d..7cdb89e1f72d287 100644
--- a/mlir/test/Conversion/FuncToLLVM/calling-convention.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/calling-convention.mlir
@@ -1,5 +1,5 @@
-// RUN: mlir-opt -finalize-memref-to-llvm='use-opaque-pointers=1' -llvm-request-c-wrappers -convert-func-to-llvm='use-opaque-pointers=1' -reconcile-unrealized-casts %s | FileCheck %s
-// RUN: mlir-opt -finalize-memref-to-llvm='use-opaque-pointers=1' -convert-func-to-llvm='use-opaque-pointers=1' -reconcile-unrealized-casts %s | FileCheck %s --check-prefix=EMIT_C_ATTRIBUTE
+// RUN: mlir-opt -finalize-memref-to-llvm -llvm-request-c-wrappers -convert-func-to-llvm -reconcile-unrealized-casts %s | FileCheck %s
+// RUN: mlir-opt -finalize-memref-to-llvm -convert-func-to-llvm -reconcile-unrealized-casts %s | FileCheck %s --check-prefix=EMIT_C_ATTRIBUTE
 
 // This tests the default memref calling convention and the emission of C
 // wrappers. We don't need to separate runs because the wrapper-emission
diff --git a/mlir/test/Conversion/FuncToLLVM/convert-argattrs.mlir b/mlir/test/Conversion/FuncToLLVM/convert-argattrs.mlir
index 41aff17d86919f6..85c7cbddfdbf634 100644
--- a/mlir/test/Conversion/FuncToLLVM/convert-argattrs.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/convert-argattrs.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -convert-func-to-llvm='use-opaque-pointers=1' %s | FileCheck %s
+// RUN: mlir-opt -convert-func-to-llvm %s | FileCheck %s
 
 // CHECK-LABEL: func @check_attributes
 // CHECK-SAME: {dialect.a = true, dialect.b = 4 : i64}
diff --git a/mlir/test/Conversion/FuncToLLVM/convert-data-layout.mlir b/mlir/test/Conversion/FuncToLLVM/convert-data-layout.mlir
index fb33d4fdfbe7c9d..0e7c16ec507998c 100644
--- a/mlir/test/Conversion/FuncToLLVM/convert-data-layout.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/convert-data-layout.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -set-llvm-module-datalayout -convert-func-to-llvm='use-opaque-pointers=1' %s | FileCheck %s
+// RUN: mlir-opt -set-llvm-module-datalayout -convert-func-to-llvm %s | FileCheck %s
 
 // RUN-32: mlir-opt -set-llvm-module-datalayout='data-layout=p:32:32:32' -convert-func-to-llvm='use-opaque-pointers=1' %s \
 // RUN-32: | FileCheck %s
diff --git a/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir b/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir
index 9fe5ad5cdda65ff..765d8469f3c5618 100644
--- a/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -convert-func-to-llvm='use-opaque-pointers=1' -split-input-file -verify-diagnostics %s | FileCheck %s
+// RUN: mlir-opt -convert-func-to-llvm -split-input-file -verify-diagnostics %s | FileCheck %s
 
 //CHECK: llvm.func @second_order_arg(!llvm.ptr)
 func.func private @second_order_arg(%arg0 : () -> ())
diff --git a/mlir/test/Conversion/FuncToLLVM/emit-c-wrappers-for-external-callers.mlir b/mlir/test/Conversion/FuncToLLVM/emit-c-wrappers-for-external-callers.mlir
index dd474e140110575..826ca9540ae5655 100644
--- a/mlir/test/Conversion/FuncToLLVM/emit-c-wrappers-for-external-callers.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/emit-c-wrappers-for-external-callers.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -llvm-request-c-wrappers -convert-func-to-llvm='use-opaque-pointers=1' %s | FileCheck %s
+// RUN: mlir-opt -llvm-request-c-wrappers -convert-func-to-llvm %s | FileCheck %s
 
 // CHECK: llvm.func @res_attrs_with_memref_return() -> (!llvm.struct{{.*}} {test.returnOne})
 // CHECK-LABEL: llvm.func @_mlir_ciface_res_attrs_with_memref_return
diff --git a/mlir/test/Conversion/FuncToLLVM/emit-c-wrappers-for-external-functions.mlir b/mlir/test/Conversion/FuncToLLVM/emit-c-wrappers-for-external-functions.mlir
index 027d29b0bf079a4..28c2638c7be519c 100644
--- a/mlir/test/Conversion/FuncToLLVM/emit-c-wrappers-for-external-functions.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/emit-c-wrappers-for-external-functions.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt -llvm-request-c-wrappers -convert-func-to-llvm='use-opaque-pointers=1' %s | FileCheck %s
+// RUN: mlir-opt -llvm-request-c-wrappers -convert-func-to-llvm %s | FileCheck %s
 
 // CHECK: llvm.func private @res_attrs_with_memref_return() -> (!llvm.struct{{.*}} {test.returnOne})
 // CHECK-LABEL: llvm.func @_mlir_ciface_res_attrs_with_memref_return
diff --git a/mlir/test/Conversion/FuncToLLVM/func-memref-return.mlir b/mlir/test/Conversion/FuncToLLVM/func-memref-return.mlir
index b584d4ce28f52ab..91ef571cb3bf71c 100644
--- a/mlir/test/Conversion/FuncToLLVM/func-memref-return.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/func-memref-return.mlir
@@ -1,6 +1,6 @@
-// RUN: mlir-opt -convert-func-to-llvm='use-opaque-pointers=1' -reconcile-unrealized-casts %s | FileCheck %s
+// RUN: mlir-opt -convert-func-to-llvm -reconcile-unrealized-casts %s | FileCheck %s
 
-// RUN: mlir-opt -convert-func-to-llvm='use-bare-ptr-memref-call-conv=1 use-opaque-pointers=1'  %s | FileCheck %s --check-prefix=BAREPTR
+// RUN: mlir-opt -convert-func-to-llvm='use-bare-ptr-memref-call-conv=1'  %s | FileCheck %s --check-prefix=BAREPTR
 
 // RUN: mlir-opt -transform-interpreter %s | FileCheck %s --check-prefix=BAREPTR
 
diff --git a/mlir/test/Conversion/FuncToLLVM/func-memref.mlir b/mlir/test/Conversion/FuncToLLVM/func-memref.mlir
index b61287643dca948..d44a07bdcc9ab06 100644
--- a/mlir/test/Conversion/FuncToLLVM/func-memref.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/func-memref.mlir
@@ -1,5 +1,5 @@
-// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-arith-to-llvm),convert-func-to-llvm{use-opaque-pointers=1},reconcile-unrealized-casts)" -split-input-file %s | FileCheck %s
-// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-arith-to-llvm),convert-func-to-llvm{use-bare-ptr-memref-call-conv=1 use-opaque-pointers=1},reconcile-unrealized-casts)" -split-input-file %s | FileCheck %s --check-prefix=BAREPTR
+// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts)" -split-input-file %s | FileCheck %s
+// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-arith-to-llvm),convert-func-to-llvm{use-bare-ptr-memref-call-conv=1},reconcile-unrealized-casts)" -split-input-file %s | FileCheck %s --check-prefix=BAREPTR
 
 // BAREPTR-LABEL: func @check_noalias
 // BAREPTR-SAME: %{{.*}}: !llvm.ptr {llvm.noalias}, %{{.*}}: !llvm.ptr {llvm.noalias}
diff --git a/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir b/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir
index 8254e77c8628bdf..9cc6bbf0873abdb 100644
--- a/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/func-to-llvm.mlir
@@ -1,6 +1,6 @@
-// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-math-to-llvm,convert-arith-to-llvm),convert-func-to-llvm{use-opaque-pointers=1},reconcile-unrealized-casts)" %s | FileCheck %s
+// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-math-to-llvm,convert-arith-to-llvm),convert-func-to-llvm,reconcile-unrealized-casts)" %s | FileCheck %s
 
-// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-math-to-llvm,convert-arith-to-llvm{index-bitwidth=32}),convert-func-to-llvm{index-bitwidth=32 use-opaque-pointers=1},reconcile-unrealized-casts)" %s | FileCheck --check-prefix=CHECK32 %s
+// RUN: mlir-opt -pass-pipeline="builtin.module(func.func(convert-math-to-llvm,convert-arith-to-llvm{index-bitwidth=32}),convert-func-to-llvm{index-bitwidth=32},reconcile-unrealized-casts)" %s | FileCheck --check-prefix=CHECK32 %s
 
 // RUN: mlir-opt -transform-interpreter %s | FileCheck --check-prefix=CHECK32 %s
 
diff --git a/mlir/test/Conversion/FuncToLLVM/invalid.mlir b/mlir/test/Conversion/FuncToLLVM/invalid.mlir
index 798d0a8519efeba..e70252ff87ed133 100644
--- a/mlir/test/Conversion/FuncToLLVM/invalid.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/invalid.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -convert-func-to-llvm='use-opaque-pointers=1' -verify-diagnostics -split-input-file
+// RUN: mlir-opt %s -convert-func-to-llvm -verify-diagnostics -split-input-file
 
 // Should not crash on unsupported types in function signatures.
 func.func private @unsupported_signature() -> tensor<10 x i32>
diff --git a/mlir/test/Conversion/FuncToLLVM/typed-pointers.mlir b/mlir/test/Conversion/FuncToLLVM/typed-pointers.mlir
deleted file mode 100644
index 7b3b816cc38bb12..000000000000000
--- a/mlir/test/Conversion/FuncToLLVM/typed-pointers.mlir
+++ /dev/null
@@ -1,114 +0,0 @@
-// RUN: mlir-opt -convert-func-to-llvm='use-opaque-pointers=0' -split-input-file %s | FileCheck %s
-
-//CHECK: llvm.func @second_order_arg(!llvm.ptr<func<void ()>>)
-func.func private @second_order_arg(%arg0 : () -> ())
-
-//CHECK: llvm.func @second_order_result() -> !llvm.ptr<func<void ()>>
-func.func private @second_order_result() -> (() -> ())
-
-//CHECK: llvm.func @second_order_multi_result() -> !llvm.struct<(ptr<func<i32 ()>>, ptr<func<i64 ()>>, ptr<func<f32 ()>>)>
-func.func private @second_order_multi_result() -> (() -> (i32), () -> (i64), () -> (f32))
-
-//CHECK: llvm.func @third_order(!llvm.ptr<func<ptr<func<void ()>> (ptr<func<void ()>>)>>) -> !llvm.ptr<func<ptr<func<void ()>> (ptr<func<void ()>>)>>
-func.func private @third_order(%arg0 : (() -> ()) -> (() -> ())) -> ((() -> ()) -> (() -> ()))
-
-//CHECK: llvm.func @fifth_order_left(!llvm.ptr<func<void (ptr<func<void (ptr<func<void (ptr<func<void ()>>)>>)>>)>>)
-func.func private @fifth_order_left(%arg0: (((() -> ()) -> ()) -> ()) -> ())
-
-//CHECK: llvm.func @fifth_order_right(!llvm.ptr<func<ptr<func<ptr<func<ptr<func<void ()>> ()>> ()>> ()>>)
-func.func private @fifth_order_right(%arg0: () -> (() -> (() -> (() -> ()))))
-
-// Check that memrefs are converted to argument packs if appear as function arguments.
-// CHECK: llvm.func @memref_call_conv(!llvm.ptr<f32>, !llvm.ptr<f32>, i64, i64, i64)
-func.func private @memref_call_conv(%arg0: memref<?xf32>)
-
-// Same in nested functions.
-// CHECK: llvm.func @memref_call_conv_nested(!llvm.ptr<func<void (ptr<f32>, ptr<f32>, i64, i64, i64)>>)
-func.func private @memref_call_conv_nested(%arg0: (memref<?xf32>) -> ())
-
-//CHECK-LABEL: llvm.func @pass_through(%arg0: !llvm.ptr<func<void ()>>) -> !llvm.ptr<func<void ()>> {
-func.func @pass_through(%arg0: () -> ()) -> (() -> ()) {
-// CHECK-NEXT:  llvm.br ^bb1(%arg0 : !llvm.ptr<func<void ()>>)
-  cf.br ^bb1(%arg0 : () -> ())
-
-//CHECK-NEXT: ^bb1(%0: !llvm.ptr<func<void ()>>):
-^bb1(%bbarg: () -> ()):
-// CHECK-NEXT:  llvm.return %0 : !llvm.ptr<func<void ()>>
-  return %bbarg : () -> ()
-}
-
-// CHECK-LABEL: llvm.func @indirect_call(%arg0: !llvm.ptr<func<i32 (f32)>>, %arg1: f32) -> i32 {
-func.func @indirect_call(%arg0: (f32) -> i32, %arg1: f32) -> i32 {
-// CHECK-NEXT:  %0 = llvm.call %arg0(%arg1) : !llvm.ptr<func<i32 (f32)>>, (f32) -> i32
-  %0 = call_indirect %arg0(%arg1) : (f32) -> i32
-// CHECK-NEXT:  llvm.return %0 : i32
-  return %0 : i32
-}
-
-// CHECK-LABEL: llvm.func @get_i64() -> i64
-func.func private @get_i64() -> (i64)
-// CHECK-LABEL: llvm.func @get_f32() -> f32
-func.func private @get_f32() -> (f32)
-// CHECK-LABEL: llvm.func @get_memref() -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>
-func.func private @get_memref() -> (memref<42x?x10x?xf32>)
-
-// CHECK-LABEL: llvm.func @multireturn() -> !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)> {
-func.func @multireturn() -> (i64, f32, memref<42x?x10x?xf32>) {
-^bb0:
-// CHECK-NEXT:  {{.*}} = llvm.call @get_i64() : () -> i64
-// CHECK-NEXT:  {{.*}} = llvm.call @get_f32() : () -> f32
-// CHECK-NEXT:  {{.*}} = llvm.call @get_memref() : () -> !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>
-  %0 = call @get_i64() : () -> (i64)
-  %1 = call @get_f32() : () -> (f32)
-  %2 = call @get_memref() : () -> (memref<42x?x10x?xf32>)
-// CHECK-NEXT:  {{.*}} = llvm.mlir.undef : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
-// CHECK-NEXT:  {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[0] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
-// CHECK-NEXT:  {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[1] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
-// CHECK-NEXT:  {{.*}} = llvm.insertvalue {{.*}}, {{.*}}[2] : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
-// CHECK-NEXT:  llvm.return {{.*}} : !llvm.struct<(i64, f32, struct<(ptr<f32>, ptr<f32>, i64, array<4 x i64>, array<4 x i64>)>)>
-  return %0, %1, %2 : i64, f32, memref<42x?x10x?xf32>
-}
-
-//===========================================================================//
-// Calling convention on returning unranked memrefs.
-// IR below produced by running -finalize-memref-to-llvm without opaque
-// pointers on calling-convention.mlir
-//===========================================================================//
-
-func.func @return_var_memref(%arg0: memref<4x3xf32>) -> memref<*xf32> attributes {llvm.emit_c_interface} {
-  %0 = builtin.unrealized_conversion_cast %arg0 : memref<4x3xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
-  %1 = llvm.mlir.constant(1 : index) : i64
-  %2 = llvm.alloca %1 x !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)> : (i64) -> !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>>
-  llvm.store %0, %2 : !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>>
-  %3 = llvm.bitcast %2 : !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>> to !llvm.ptr<i8>
-  %4 = llvm.mlir.constant(2 : index) : i64
-  %5 = llvm.mlir.undef : !llvm.struct<(i64, ptr<i8>)>
-  %6 = llvm.insertvalue %4, %5[0] : !llvm.struct<(i64, ptr<i8>)>
-  %7 = llvm.insertvalue %3, %6[1] : !llvm.struct<(i64, ptr<i8>)>
-  %8 = builtin.unrealized_conversion_cast %7 : !llvm.struct<(i64, ptr<i8>)> to memref<*xf32>
-  return %8 : memref<*xf32>
-}
-
-// Check that the result memref is passed as parameter
-// CHECK-LABEL: @_mlir_ciface_return_var_memref
-// CHECK-SAME: (%{{.*}}: !llvm.ptr<struct<(i64, ptr<i8>)>>, %{{.*}}: !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>>)
-
-func.func @return_two_var_memref(%arg0: memref<4x3xf32>) -> (memref<*xf32>, memref<*xf32>) attributes {llvm.emit_c_interface} {
-  %0 = builtin.unrealized_conversion_cast %arg0 : memref<4x3xf32> to !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>
-  %1 = llvm.mlir.constant(1 : index) : i64
-  %2 = llvm.alloca %1 x !llvm.struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)> : (i64) -> !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>>
-  llvm.store %0, %2 : !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>>
-  %3 = llvm.bitcast %2 : !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>> to !llvm.ptr<i8>
-  %4 = llvm.mlir.constant(2 : index) : i64
-  %5 = llvm.mlir.undef : !llvm.struct<(i64, ptr<i8>)>
-  %6 = llvm.insertvalue %4, %5[0] : !llvm.struct<(i64, ptr<i8>)>
-  %7 = llvm.insertvalue %3, %6[1] : !llvm.struct<(i64, ptr<i8>)>
-  %8 = builtin.unrealized_conversion_cast %7 : !llvm.struct<(i64, ptr<i8>)> to memref<*xf32>
-  return %8, %8 : memref<*xf32>, memref<*xf32>
-}
-
-// Check that the result memrefs are passed as parameter
-// CHECK-LABEL: @_mlir_ciface_return_two_var_memref
-// CHECK-SAME: (%{{.*}}: !llvm.ptr<struct<(struct<(i64, ptr<i8>)>, struct<(i64, ptr<i8>)>)>>,
-// CHECK-SAME: %{{.*}}: !llvm.ptr<struct<(ptr<f32>, ptr<f32>, i64, array<2 x i64>, array<2 x i64>)>>)
-
diff --git a/mlir/tools/mlir-vulkan-runner/mlir-vulkan-runner.cpp b/mlir/tools/mlir-vulkan-runner/mlir-vulkan-runner.cpp
index d3ec890bf485907..5b8e236b4618f51 100644
--- a/mlir/tools/mlir-vulkan-runner/mlir-vulkan-runner.cpp
+++ b/mlir/tools/mlir-vulkan-runner/mlir-vulkan-runner.cpp
@@ -77,8 +77,7 @@ static LogicalResult runMLIRPasses(Operation *op,
   ConvertFuncToLLVMPassOptions funcToLLVMOptions{};
   funcToLLVMOptions.indexBitwidth =
       DataLayout(module).getTypeSizeInBits(IndexType::get(module.getContext()));
-  passManager.addPass(
-      createConvertFuncToLLVMPass(funcToLLVMOptions));
+  passManager.addPass(createConvertFuncToLLVMPass(funcToLLVMOptions));
   passManager.addPass(createReconcileUnrealizedCastsPass());
   passManager.addPass(createConvertVulkanLaunchFuncToVulkanCallsPass());
 

>From 6f50fe0990afcef007ab707cf69a62c50a400249 Mon Sep 17 00:00:00 2001
From: Christian Ulmann <christian.ulmann at nextsilicon.com>
Date: Mon, 30 Oct 2023 21:46:25 +0000
Subject: [PATCH 2/2] [MLIR][AsyncToLLVM] Remove typed pointer support

This commit removes the support for lowering Async to LLVM dialect with
typed pointers. Typed pointers have been deprecated for a while now and
it's planned to soon remove them from the LLVM dialect.

Related PSA: https://discourse.llvm.org/t/psa-removal-of-typed-pointers-from-the-llvm-dialect/74502
---
 mlir/include/mlir/Conversion/Passes.td        |   5 -
 .../mlir/Dialect/LLVMIR/FunctionCallUtils.h   |   5 +-
 .../Conversion/AsyncToLLVM/AsyncToLLVM.cpp    | 239 ++++++------------
 .../AsyncToLLVM/convert-coro-to-llvm.mlir     |   2 +-
 .../AsyncToLLVM/convert-runtime-to-llvm.mlir  |   2 +-
 .../AsyncToLLVM/convert-to-llvm.mlir          |   2 +-
 .../AsyncToLLVM/typed-pointers.mlir           | 138 ----------
 7 files changed, 81 insertions(+), 312 deletions(-)
 delete mode 100644 mlir/test/Conversion/AsyncToLLVM/typed-pointers.mlir

diff --git a/mlir/include/mlir/Conversion/Passes.td b/mlir/include/mlir/Conversion/Passes.td
index a2307bc243f6156..5423be0e91d0ac4 100644
--- a/mlir/include/mlir/Conversion/Passes.td
+++ b/mlir/include/mlir/Conversion/Passes.td
@@ -191,11 +191,6 @@ def ConvertAsyncToLLVMPass : Pass<"convert-async-to-llvm", "ModuleOp"> {
     "LLVM::LLVMDialect",
     "func::FuncDialect",
   ];
-  let options = [
-    Option<"useOpaquePointers", "use-opaque-pointers", "bool",
-           /*default=*/"true", "Generate LLVM IR using opaque pointers "
-           "instead of typed pointers">,
-  ];
 }
 
 //===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/LLVMIR/FunctionCallUtils.h b/mlir/include/mlir/Dialect/LLVMIR/FunctionCallUtils.h
index 9e69717f471bce2..05320c0c7186907 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/FunctionCallUtils.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/FunctionCallUtils.h
@@ -52,8 +52,9 @@ LLVM::LLVMFuncOp lookupOrCreatePrintNewlineFn(ModuleOp moduleOp);
 LLVM::LLVMFuncOp lookupOrCreateMallocFn(ModuleOp moduleOp, Type indexType,
                                         bool opaquePointers);
 LLVM::LLVMFuncOp lookupOrCreateAlignedAllocFn(ModuleOp moduleOp, Type indexType,
-                                              bool opaquePointers);
-LLVM::LLVMFuncOp lookupOrCreateFreeFn(ModuleOp moduleOp, bool opaquePointers);
+                                              bool opaquePointers = true);
+LLVM::LLVMFuncOp lookupOrCreateFreeFn(ModuleOp moduleOp,
+                                      bool opaquePointers = true);
 LLVM::LLVMFuncOp lookupOrCreateGenericAllocFn(ModuleOp moduleOp, Type indexType,
                                               bool opaquePointers);
 LLVM::LLVMFuncOp lookupOrCreateGenericAlignedAllocFn(ModuleOp moduleOp,
diff --git a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
index d9ea60a6749d926..3e61c9c7de50e2f 100644
--- a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
+++ b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
@@ -76,20 +76,16 @@ namespace {
 /// lowering all async data types become opaque pointers at runtime.
 struct AsyncAPI {
   // All async types are lowered to opaque LLVM pointers at runtime.
-  static LLVM::LLVMPointerType opaquePointerType(MLIRContext *ctx,
-                                                 bool useLLVMOpaquePointers) {
-    if (useLLVMOpaquePointers)
-      return LLVM::LLVMPointerType::get(ctx);
-    return LLVM::LLVMPointerType::get(IntegerType::get(ctx, 8));
+  static LLVM::LLVMPointerType opaquePointerType(MLIRContext *ctx) {
+    return LLVM::LLVMPointerType::get(ctx);
   }
 
   static LLVM::LLVMTokenType tokenType(MLIRContext *ctx) {
     return LLVM::LLVMTokenType::get(ctx);
   }
 
-  static FunctionType addOrDropRefFunctionType(MLIRContext *ctx,
-                                               bool useLLVMOpaquePointers) {
-    auto ref = opaquePointerType(ctx, useLLVMOpaquePointers);
+  static FunctionType addOrDropRefFunctionType(MLIRContext *ctx) {
+    auto ref = opaquePointerType(ctx);
     auto count = IntegerType::get(ctx, 64);
     return FunctionType::get(ctx, {ref, count}, {});
   }
@@ -98,10 +94,9 @@ struct AsyncAPI {
     return FunctionType::get(ctx, {}, {TokenType::get(ctx)});
   }
 
-  static FunctionType createValueFunctionType(MLIRContext *ctx,
-                                              bool useLLVMOpaquePointers) {
+  static FunctionType createValueFunctionType(MLIRContext *ctx) {
     auto i64 = IntegerType::get(ctx, 64);
-    auto value = opaquePointerType(ctx, useLLVMOpaquePointers);
+    auto value = opaquePointerType(ctx);
     return FunctionType::get(ctx, {i64}, {value});
   }
 
@@ -110,10 +105,9 @@ struct AsyncAPI {
     return FunctionType::get(ctx, {i64}, {GroupType::get(ctx)});
   }
 
-  static FunctionType getValueStorageFunctionType(MLIRContext *ctx,
-                                                  bool useLLVMOpaquePointers) {
-    auto value = opaquePointerType(ctx, useLLVMOpaquePointers);
-    auto storage = opaquePointerType(ctx, useLLVMOpaquePointers);
+  static FunctionType getValueStorageFunctionType(MLIRContext *ctx) {
+    auto value = opaquePointerType(ctx);
+    auto storage = opaquePointerType(ctx);
     return FunctionType::get(ctx, {value}, {storage});
   }
 
@@ -121,9 +115,8 @@ struct AsyncAPI {
     return FunctionType::get(ctx, {TokenType::get(ctx)}, {});
   }
 
-  static FunctionType emplaceValueFunctionType(MLIRContext *ctx,
-                                               bool useLLVMOpaquePointers) {
-    auto value = opaquePointerType(ctx, useLLVMOpaquePointers);
+  static FunctionType emplaceValueFunctionType(MLIRContext *ctx) {
+    auto value = opaquePointerType(ctx);
     return FunctionType::get(ctx, {value}, {});
   }
 
@@ -131,9 +124,8 @@ struct AsyncAPI {
     return FunctionType::get(ctx, {TokenType::get(ctx)}, {});
   }
 
-  static FunctionType setValueErrorFunctionType(MLIRContext *ctx,
-                                                bool useLLVMOpaquePointers) {
-    auto value = opaquePointerType(ctx, useLLVMOpaquePointers);
+  static FunctionType setValueErrorFunctionType(MLIRContext *ctx) {
+    auto value = opaquePointerType(ctx);
     return FunctionType::get(ctx, {value}, {});
   }
 
@@ -142,9 +134,8 @@ struct AsyncAPI {
     return FunctionType::get(ctx, {TokenType::get(ctx)}, {i1});
   }
 
-  static FunctionType isValueErrorFunctionType(MLIRContext *ctx,
-                                               bool useLLVMOpaquePointers) {
-    auto value = opaquePointerType(ctx, useLLVMOpaquePointers);
+  static FunctionType isValueErrorFunctionType(MLIRContext *ctx) {
+    auto value = opaquePointerType(ctx);
     auto i1 = IntegerType::get(ctx, 1);
     return FunctionType::get(ctx, {value}, {i1});
   }
@@ -158,9 +149,8 @@ struct AsyncAPI {
     return FunctionType::get(ctx, {TokenType::get(ctx)}, {});
   }
 
-  static FunctionType awaitValueFunctionType(MLIRContext *ctx,
-                                             bool useLLVMOpaquePointers) {
-    auto value = opaquePointerType(ctx, useLLVMOpaquePointers);
+  static FunctionType awaitValueFunctionType(MLIRContext *ctx) {
+    auto value = opaquePointerType(ctx);
     return FunctionType::get(ctx, {value}, {});
   }
 
@@ -168,15 +158,9 @@ struct AsyncAPI {
     return FunctionType::get(ctx, {GroupType::get(ctx)}, {});
   }
 
-  static FunctionType executeFunctionType(MLIRContext *ctx,
-                                          bool useLLVMOpaquePointers) {
-    auto hdl = opaquePointerType(ctx, useLLVMOpaquePointers);
-    Type resume;
-    if (useLLVMOpaquePointers)
-      resume = LLVM::LLVMPointerType::get(ctx);
-    else
-      resume = LLVM::LLVMPointerType::get(
-          resumeFunctionType(ctx, useLLVMOpaquePointers));
+  static FunctionType executeFunctionType(MLIRContext *ctx) {
+    auto hdl = opaquePointerType(ctx);
+    Type resume = AsyncAPI::opaquePointerType(ctx);
     return FunctionType::get(ctx, {hdl, resume}, {});
   }
 
@@ -186,42 +170,22 @@ struct AsyncAPI {
                              {i64});
   }
 
-  static FunctionType
-  awaitTokenAndExecuteFunctionType(MLIRContext *ctx,
-                                   bool useLLVMOpaquePointers) {
-    auto hdl = opaquePointerType(ctx, useLLVMOpaquePointers);
-    Type resume;
-    if (useLLVMOpaquePointers)
-      resume = LLVM::LLVMPointerType::get(ctx);
-    else
-      resume = LLVM::LLVMPointerType::get(
-          resumeFunctionType(ctx, useLLVMOpaquePointers));
+  static FunctionType awaitTokenAndExecuteFunctionType(MLIRContext *ctx) {
+    auto hdl = opaquePointerType(ctx);
+    Type resume = AsyncAPI::opaquePointerType(ctx);
     return FunctionType::get(ctx, {TokenType::get(ctx), hdl, resume}, {});
   }
 
-  static FunctionType
-  awaitValueAndExecuteFunctionType(MLIRContext *ctx,
-                                   bool useLLVMOpaquePointers) {
-    auto value = opaquePointerType(ctx, useLLVMOpaquePointers);
-    auto hdl = opaquePointerType(ctx, useLLVMOpaquePointers);
-    Type resume;
-    if (useLLVMOpaquePointers)
-      resume = LLVM::LLVMPointerType::get(ctx);
-    else
-      resume = LLVM::LLVMPointerType::get(
-          resumeFunctionType(ctx, useLLVMOpaquePointers));
+  static FunctionType awaitValueAndExecuteFunctionType(MLIRContext *ctx) {
+    auto value = opaquePointerType(ctx);
+    auto hdl = opaquePointerType(ctx);
+    Type resume = AsyncAPI::opaquePointerType(ctx);
     return FunctionType::get(ctx, {value, hdl, resume}, {});
   }
 
-  static FunctionType
-  awaitAllAndExecuteFunctionType(MLIRContext *ctx, bool useLLVMOpaquePointers) {
-    auto hdl = opaquePointerType(ctx, useLLVMOpaquePointers);
-    Type resume;
-    if (useLLVMOpaquePointers)
-      resume = LLVM::LLVMPointerType::get(ctx);
-    else
-      resume = LLVM::LLVMPointerType::get(
-          resumeFunctionType(ctx, useLLVMOpaquePointers));
+  static FunctionType awaitAllAndExecuteFunctionType(MLIRContext *ctx) {
+    auto hdl = opaquePointerType(ctx);
+    Type resume = AsyncAPI::opaquePointerType(ctx);
     return FunctionType::get(ctx, {GroupType::get(ctx), hdl, resume}, {});
   }
 
@@ -230,17 +194,16 @@ struct AsyncAPI {
   }
 
   // Auxiliary coroutine resume intrinsic wrapper.
-  static Type resumeFunctionType(MLIRContext *ctx, bool useLLVMOpaquePointers) {
+  static Type resumeFunctionType(MLIRContext *ctx) {
     auto voidTy = LLVM::LLVMVoidType::get(ctx);
-    auto ptrType = opaquePointerType(ctx, useLLVMOpaquePointers);
+    auto ptrType = opaquePointerType(ctx);
     return LLVM::LLVMFunctionType::get(voidTy, {ptrType}, false);
   }
 };
 } // namespace
 
 /// Adds Async Runtime C API declarations to the module.
-static void addAsyncRuntimeApiDeclarations(ModuleOp module,
-                                           bool useLLVMOpaquePointers) {
+static void addAsyncRuntimeApiDeclarations(ModuleOp module) {
   auto builder =
       ImplicitLocOpBuilder::atBlockEnd(module.getLoc(), module.getBody());
 
@@ -251,39 +214,30 @@ static void addAsyncRuntimeApiDeclarations(ModuleOp module,
   };
 
   MLIRContext *ctx = module.getContext();
-  addFuncDecl(kAddRef,
-              AsyncAPI::addOrDropRefFunctionType(ctx, useLLVMOpaquePointers));
-  addFuncDecl(kDropRef,
-              AsyncAPI::addOrDropRefFunctionType(ctx, useLLVMOpaquePointers));
+  addFuncDecl(kAddRef, AsyncAPI::addOrDropRefFunctionType(ctx));
+  addFuncDecl(kDropRef, AsyncAPI::addOrDropRefFunctionType(ctx));
   addFuncDecl(kCreateToken, AsyncAPI::createTokenFunctionType(ctx));
-  addFuncDecl(kCreateValue,
-              AsyncAPI::createValueFunctionType(ctx, useLLVMOpaquePointers));
+  addFuncDecl(kCreateValue, AsyncAPI::createValueFunctionType(ctx));
   addFuncDecl(kCreateGroup, AsyncAPI::createGroupFunctionType(ctx));
   addFuncDecl(kEmplaceToken, AsyncAPI::emplaceTokenFunctionType(ctx));
-  addFuncDecl(kEmplaceValue,
-              AsyncAPI::emplaceValueFunctionType(ctx, useLLVMOpaquePointers));
+  addFuncDecl(kEmplaceValue, AsyncAPI::emplaceValueFunctionType(ctx));
   addFuncDecl(kSetTokenError, AsyncAPI::setTokenErrorFunctionType(ctx));
-  addFuncDecl(kSetValueError,
-              AsyncAPI::setValueErrorFunctionType(ctx, useLLVMOpaquePointers));
+  addFuncDecl(kSetValueError, AsyncAPI::setValueErrorFunctionType(ctx));
   addFuncDecl(kIsTokenError, AsyncAPI::isTokenErrorFunctionType(ctx));
-  addFuncDecl(kIsValueError,
-              AsyncAPI::isValueErrorFunctionType(ctx, useLLVMOpaquePointers));
+  addFuncDecl(kIsValueError, AsyncAPI::isValueErrorFunctionType(ctx));
   addFuncDecl(kIsGroupError, AsyncAPI::isGroupErrorFunctionType(ctx));
   addFuncDecl(kAwaitToken, AsyncAPI::awaitTokenFunctionType(ctx));
-  addFuncDecl(kAwaitValue,
-              AsyncAPI::awaitValueFunctionType(ctx, useLLVMOpaquePointers));
+  addFuncDecl(kAwaitValue, AsyncAPI::awaitValueFunctionType(ctx));
   addFuncDecl(kAwaitGroup, AsyncAPI::awaitGroupFunctionType(ctx));
-  addFuncDecl(kExecute,
-              AsyncAPI::executeFunctionType(ctx, useLLVMOpaquePointers));
-  addFuncDecl(kGetValueStorage, AsyncAPI::getValueStorageFunctionType(
-                                    ctx, useLLVMOpaquePointers));
+  addFuncDecl(kExecute, AsyncAPI::executeFunctionType(ctx));
+  addFuncDecl(kGetValueStorage, AsyncAPI::getValueStorageFunctionType(ctx));
   addFuncDecl(kAddTokenToGroup, AsyncAPI::addTokenToGroupFunctionType(ctx));
-  addFuncDecl(kAwaitTokenAndExecute, AsyncAPI::awaitTokenAndExecuteFunctionType(
-                                         ctx, useLLVMOpaquePointers));
-  addFuncDecl(kAwaitValueAndExecute, AsyncAPI::awaitValueAndExecuteFunctionType(
-                                         ctx, useLLVMOpaquePointers));
-  addFuncDecl(kAwaitAllAndExecute, AsyncAPI::awaitAllAndExecuteFunctionType(
-                                       ctx, useLLVMOpaquePointers));
+  addFuncDecl(kAwaitTokenAndExecute,
+              AsyncAPI::awaitTokenAndExecuteFunctionType(ctx));
+  addFuncDecl(kAwaitValueAndExecute,
+              AsyncAPI::awaitValueAndExecuteFunctionType(ctx));
+  addFuncDecl(kAwaitAllAndExecute,
+              AsyncAPI::awaitAllAndExecuteFunctionType(ctx));
   addFuncDecl(kGetNumWorkerThreads, AsyncAPI::getNumWorkerThreads(ctx));
 }
 
@@ -296,7 +250,7 @@ static constexpr const char *kResume = "__resume";
 /// A function that takes a coroutine handle and calls a `llvm.coro.resume`
 /// intrinsics. We need this function to be able to pass it to the async
 /// runtime execute API.
-static void addResumeFunction(ModuleOp module, bool useOpaquePointers) {
+static void addResumeFunction(ModuleOp module) {
   if (module.lookupSymbol(kResume))
     return;
 
@@ -305,11 +259,7 @@ static void addResumeFunction(ModuleOp module, bool useOpaquePointers) {
   auto moduleBuilder = ImplicitLocOpBuilder::atBlockEnd(loc, module.getBody());
 
   auto voidTy = LLVM::LLVMVoidType::get(ctx);
-  Type ptrType;
-  if (useOpaquePointers)
-    ptrType = LLVM::LLVMPointerType::get(ctx);
-  else
-    ptrType = LLVM::LLVMPointerType::get(IntegerType::get(ctx, 8));
+  Type ptrType = AsyncAPI::opaquePointerType(ctx);
 
   auto resumeOp = moduleBuilder.create<LLVM::LLVMFuncOp>(
       kResume, LLVM::LLVMFunctionType::get(voidTy, {ptrType}));
@@ -330,15 +280,10 @@ namespace {
 /// AsyncRuntimeTypeConverter only converts types from the Async dialect to
 /// their runtime type (opaque pointers) and does not convert any other types.
 class AsyncRuntimeTypeConverter : public TypeConverter {
-  bool llvmOpaquePointers = false;
-
 public:
-  AsyncRuntimeTypeConverter(const LowerToLLVMOptions &options)
-      : llvmOpaquePointers(options.useOpaquePointers) {
+  AsyncRuntimeTypeConverter(const LowerToLLVMOptions &options) {
     addConversion([](Type type) { return type; });
-    addConversion([this](Type type) {
-      return convertAsyncTypes(type, llvmOpaquePointers);
-    });
+    addConversion([](Type type) { return convertAsyncTypes(type); });
 
     // Use UnrealizedConversionCast as the bridge so that we don't need to pull
     // in patterns for other dialects.
@@ -352,28 +297,14 @@ class AsyncRuntimeTypeConverter : public TypeConverter {
     addTargetMaterialization(addUnrealizedCast);
   }
 
-  /// Returns whether LLVM opaque pointers should be used instead of typed
-  /// pointers.
-  bool useOpaquePointers() const { return llvmOpaquePointers; }
-
-  /// Creates an LLVM pointer type which may either be a typed pointer or an
-  /// opaque pointer, depending on what options the converter was constructed
-  /// with.
-  LLVM::LLVMPointerType getPointerType(Type elementType) const {
-    if (llvmOpaquePointers)
-      return LLVM::LLVMPointerType::get(elementType.getContext());
-    return LLVM::LLVMPointerType::get(elementType);
-  }
-
-  static std::optional<Type> convertAsyncTypes(Type type,
-                                               bool useOpaquePointers) {
+  static std::optional<Type> convertAsyncTypes(Type type) {
     if (isa<TokenType, GroupType, ValueType>(type))
-      return AsyncAPI::opaquePointerType(type.getContext(), useOpaquePointers);
+      return AsyncAPI::opaquePointerType(type.getContext());
 
     if (isa<CoroIdType, CoroStateType>(type))
       return AsyncAPI::tokenType(type.getContext());
     if (isa<CoroHandleType>(type))
-      return AsyncAPI::opaquePointerType(type.getContext(), useOpaquePointers);
+      return AsyncAPI::opaquePointerType(type.getContext());
 
     return std::nullopt;
   }
@@ -414,8 +345,7 @@ class CoroIdOpConversion : public AsyncOpConversionPattern<CoroIdOp> {
   matchAndRewrite(CoroIdOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     auto token = AsyncAPI::tokenType(op->getContext());
-    auto ptrType = AsyncAPI::opaquePointerType(
-        op->getContext(), getTypeConverter()->useOpaquePointers());
+    auto ptrType = AsyncAPI::opaquePointerType(op->getContext());
     auto loc = op->getLoc();
 
     // Constants for initializing coroutine frame.
@@ -444,8 +374,7 @@ class CoroBeginOpConversion : public AsyncOpConversionPattern<CoroBeginOp> {
   LogicalResult
   matchAndRewrite(CoroBeginOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    auto ptrType = AsyncAPI::opaquePointerType(
-        op->getContext(), getTypeConverter()->useOpaquePointers());
+    auto ptrType = AsyncAPI::opaquePointerType(op->getContext());
     auto loc = op->getLoc();
 
     // Get coroutine frame size: @llvm.coro.size.i64.
@@ -472,8 +401,7 @@ class CoroBeginOpConversion : public AsyncOpConversionPattern<CoroBeginOp> {
 
     // Allocate memory for the coroutine frame.
     auto allocFuncOp = LLVM::lookupOrCreateAlignedAllocFn(
-        op->getParentOfType<ModuleOp>(), rewriter.getI64Type(),
-        getTypeConverter()->useOpaquePointers());
+        op->getParentOfType<ModuleOp>(), rewriter.getI64Type());
     auto coroAlloc = rewriter.create<LLVM::CallOp>(
         loc, allocFuncOp, ValueRange{coroAlign, coroSize});
 
@@ -499,8 +427,7 @@ class CoroFreeOpConversion : public AsyncOpConversionPattern<CoroFreeOp> {
   LogicalResult
   matchAndRewrite(CoroFreeOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
-    auto ptrType = AsyncAPI::opaquePointerType(
-        op->getContext(), getTypeConverter()->useOpaquePointers());
+    auto ptrType = AsyncAPI::opaquePointerType(op->getContext());
     auto loc = op->getLoc();
 
     // Get a pointer to the coroutine frame memory: @llvm.coro.free.
@@ -509,8 +436,7 @@ class CoroFreeOpConversion : public AsyncOpConversionPattern<CoroFreeOp> {
 
     // Free the memory.
     auto freeFuncOp =
-        LLVM::lookupOrCreateFreeFn(op->getParentOfType<ModuleOp>(),
-                                   getTypeConverter()->useOpaquePointers());
+        LLVM::lookupOrCreateFreeFn(op->getParentOfType<ModuleOp>());
     rewriter.replaceOpWithNewOp<LLVM::CallOp>(op, freeFuncOp,
                                               ValueRange(coroMem.getResult()));
 
@@ -538,8 +464,9 @@ class CoroEndOpConversion : public OpConversionPattern<CoroEndOp> {
 
     // Mark the end of a coroutine: @llvm.coro.end.
     auto coroHdl = adaptor.getHandle();
-    rewriter.create<LLVM::CoroEndOp>(op->getLoc(), rewriter.getI1Type(),
-                                     ValueRange({coroHdl, constFalse, noneToken}));
+    rewriter.create<LLVM::CoroEndOp>(
+        op->getLoc(), rewriter.getI1Type(),
+        ValueRange({coroHdl, constFalse, noneToken}));
     rewriter.eraseOp(op);
 
     return success();
@@ -673,7 +600,8 @@ class RuntimeCreateOpLowering : public ConvertOpToLLVMPattern<RuntimeCreateOp> {
         auto i64 = rewriter.getI64Type();
 
         auto storedType = converter->convertType(valueType.getValueType());
-        auto storagePtrType = getTypeConverter()->getPointerType(storedType);
+        auto storagePtrType =
+            AsyncAPI::opaquePointerType(rewriter.getContext());
 
         // %Size = getelementptr %T* null, int 1
         // %SizeI = ptrtoint %T* %Size to i64
@@ -846,12 +774,10 @@ class RuntimeAwaitAndResumeOpLowering
     Value handle = adaptor.getHandle();
 
     // A pointer to coroutine resume intrinsic wrapper.
-    addResumeFunction(op->getParentOfType<ModuleOp>(),
-                      getTypeConverter()->useOpaquePointers());
-    auto resumeFnTy = AsyncAPI::resumeFunctionType(
-        op->getContext(), getTypeConverter()->useOpaquePointers());
+    addResumeFunction(op->getParentOfType<ModuleOp>());
     auto resumePtr = rewriter.create<LLVM::AddressOfOp>(
-        op->getLoc(), getTypeConverter()->getPointerType(resumeFnTy), kResume);
+        op->getLoc(), AsyncAPI::opaquePointerType(rewriter.getContext()),
+        kResume);
 
     rewriter.create<func::CallOp>(
         op->getLoc(), apiFuncName, TypeRange(),
@@ -877,12 +803,10 @@ class RuntimeResumeOpLowering
   matchAndRewrite(RuntimeResumeOp op, OpAdaptor adaptor,
                   ConversionPatternRewriter &rewriter) const override {
     // A pointer to coroutine resume intrinsic wrapper.
-    addResumeFunction(op->getParentOfType<ModuleOp>(),
-                      getTypeConverter()->useOpaquePointers());
-    auto resumeFnTy = AsyncAPI::resumeFunctionType(
-        op->getContext(), getTypeConverter()->useOpaquePointers());
+    addResumeFunction(op->getParentOfType<ModuleOp>());
     auto resumePtr = rewriter.create<LLVM::AddressOfOp>(
-        op->getLoc(), getTypeConverter()->getPointerType(resumeFnTy), kResume);
+        op->getLoc(), AsyncAPI::opaquePointerType(rewriter.getContext()),
+        kResume);
 
     // Call async runtime API to execute a coroutine in the managed thread.
     auto coroHdl = adaptor.getHandle();
@@ -909,8 +833,7 @@ class RuntimeStoreOpLowering : public ConvertOpToLLVMPattern<RuntimeStoreOp> {
     Location loc = op->getLoc();
 
     // Get a pointer to the async value storage from the runtime.
-    auto ptrType = AsyncAPI::opaquePointerType(
-        rewriter.getContext(), getTypeConverter()->useOpaquePointers());
+    auto ptrType = AsyncAPI::opaquePointerType(rewriter.getContext());
     auto storage = adaptor.getStorage();
     auto storagePtr = rewriter.create<func::CallOp>(
         loc, kGetValueStorage, TypeRange(ptrType), storage);
@@ -923,11 +846,6 @@ class RuntimeStoreOpLowering : public ConvertOpToLLVMPattern<RuntimeStoreOp> {
           op, "failed to convert stored value type to LLVM type");
 
     Value castedStoragePtr = storagePtr.getResult(0);
-    if (!getTypeConverter()->useOpaquePointers())
-      castedStoragePtr = rewriter.create<LLVM::BitcastOp>(
-          loc, getTypeConverter()->getPointerType(llvmValueType),
-          castedStoragePtr);
-
     // Store the yielded value into the async value storage.
     auto value = adaptor.getValue();
     rewriter.create<LLVM::StoreOp>(loc, value, castedStoragePtr);
@@ -955,8 +873,7 @@ class RuntimeLoadOpLowering : public ConvertOpToLLVMPattern<RuntimeLoadOp> {
     Location loc = op->getLoc();
 
     // Get a pointer to the async value storage from the runtime.
-    auto ptrType = AsyncAPI::opaquePointerType(
-        rewriter.getContext(), getTypeConverter()->useOpaquePointers());
+    auto ptrType = AsyncAPI::opaquePointerType(rewriter.getContext());
     auto storage = adaptor.getStorage();
     auto storagePtr = rewriter.create<func::CallOp>(
         loc, kGetValueStorage, TypeRange(ptrType), storage);
@@ -969,10 +886,6 @@ class RuntimeLoadOpLowering : public ConvertOpToLLVMPattern<RuntimeLoadOp> {
           op, "failed to convert loaded value type to LLVM type");
 
     Value castedStoragePtr = storagePtr.getResult(0);
-    if (!getTypeConverter()->useOpaquePointers())
-      castedStoragePtr = rewriter.create<LLVM::BitcastOp>(
-          loc, getTypeConverter()->getPointerType(llvmValueType),
-          castedStoragePtr);
 
     // Load from the casted pointer.
     rewriter.replaceOpWithNewOp<LLVM::LoadOp>(op, llvmValueType,
@@ -1115,12 +1028,11 @@ void ConvertAsyncToLLVMPass::runOnOperation() {
   MLIRContext *ctx = module->getContext();
 
   LowerToLLVMOptions options(ctx);
-  options.useOpaquePointers = useOpaquePointers;
 
   // Add declarations for most functions required by the coroutines lowering.
   // We delay adding the resume function until it's needed because it currently
   // fails to compile unless '-O0' is specified.
-  addAsyncRuntimeApiDeclarations(module, useOpaquePointers);
+  addAsyncRuntimeApiDeclarations(module);
 
   // Lower async.runtime and async.coro operations to Async Runtime API and
   // LLVM coroutine intrinsics.
@@ -1133,8 +1045,7 @@ void ConvertAsyncToLLVMPass::runOnOperation() {
   // operations.
   LLVMTypeConverter llvmConverter(ctx, options);
   llvmConverter.addConversion([&](Type type) {
-    return AsyncRuntimeTypeConverter::convertAsyncTypes(
-        type, llvmConverter.useOpaquePointers());
+    return AsyncRuntimeTypeConverter::convertAsyncTypes(type);
   });
 
   // Convert async types in function signatures and function calls.
diff --git a/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir b/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir
index 8a611cf96f5b5f8..a398bc5710a865c 100644
--- a/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir
+++ b/mlir/test/Conversion/AsyncToLLVM/convert-coro-to-llvm.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -convert-async-to-llvm='use-opaque-pointers=1' | FileCheck %s
+// RUN: mlir-opt %s -convert-async-to-llvm | FileCheck %s
 
 // CHECK-LABEL: @coro_id
 func.func @coro_id() {
diff --git a/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir b/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
index 3672be91bbc07ad..4077edc7420dca1 100644
--- a/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
+++ b/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -convert-async-to-llvm='use-opaque-pointers=1' | FileCheck %s --dump-input=always
+// RUN: mlir-opt %s -convert-async-to-llvm | FileCheck %s --dump-input=always
 
 // CHECK-LABEL: @create_token
 func.func @create_token() {
diff --git a/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir b/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir
index fd419dc95e7a1aa..dd54bdb79872441 100644
--- a/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir
+++ b/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir
@@ -1,4 +1,4 @@
-// RUN: mlir-opt %s -split-input-file -async-to-async-runtime -convert-async-to-llvm='use-opaque-pointers=1' | FileCheck %s
+// RUN: mlir-opt %s -split-input-file -async-to-async-runtime -convert-async-to-llvm | FileCheck %s
 
 // CHECK-LABEL: reference_counting
 func.func @reference_counting(%arg0: !async.token) {
diff --git a/mlir/test/Conversion/AsyncToLLVM/typed-pointers.mlir b/mlir/test/Conversion/AsyncToLLVM/typed-pointers.mlir
deleted file mode 100644
index 07cd2add3b15122..000000000000000
--- a/mlir/test/Conversion/AsyncToLLVM/typed-pointers.mlir
+++ /dev/null
@@ -1,138 +0,0 @@
-// RUN: mlir-opt %s -split-input-file -async-to-async-runtime -convert-async-to-llvm='use-opaque-pointers=0' | FileCheck %s
-
-
-
-// CHECK-LABEL: @store
-func.func @store() {
-  // CHECK: %[[CST:.*]] = arith.constant 1.0
-  %0 = arith.constant 1.0 : f32
-  // CHECK: %[[VALUE:.*]] = call @mlirAsyncRuntimeCreateValue
-  %1 = async.runtime.create : !async.value<f32>
-  // CHECK: %[[P0:.*]] = call @mlirAsyncRuntimeGetValueStorage(%[[VALUE]])
-  // CHECK: %[[P1:.*]] = llvm.bitcast %[[P0]] : !llvm.ptr<i8> to !llvm.ptr<f32>
-  // CHECK: llvm.store %[[CST]], %[[P1]]
-  async.runtime.store %0, %1 : !async.value<f32>
-  return
-}
-
-// CHECK-LABEL: @load
-func.func @load() -> f32 {
-  // CHECK: %[[VALUE:.*]] = call @mlirAsyncRuntimeCreateValue
-  %0 = async.runtime.create : !async.value<f32>
-  // CHECK: %[[P0:.*]] = call @mlirAsyncRuntimeGetValueStorage(%[[VALUE]])
-  // CHECK: %[[P1:.*]] = llvm.bitcast %[[P0]] : !llvm.ptr<i8> to !llvm.ptr<f32>
-  // CHECK: %[[VALUE:.*]] = llvm.load %[[P1]]
-  %1 = async.runtime.load %0 : !async.value<f32>
-  // CHECK: return %[[VALUE]] : f32
-  return %1 : f32
-}
-
-// -----
-
-// CHECK-LABEL: execute_no_async_args
-func.func @execute_no_async_args(%arg0: f32, %arg1: memref<1xf32>) {
-  // CHECK: %[[TOKEN:.*]] = call @async_execute_fn(%arg0, %arg1)
-  %token = async.execute {
-    %c0 = arith.constant 0 : index
-    memref.store %arg0, %arg1[%c0] : memref<1xf32>
-    async.yield
-  }
-  // CHECK: call @mlirAsyncRuntimeAwaitToken(%[[TOKEN]])
-  // CHECK: %[[IS_ERROR:.*]] = call @mlirAsyncRuntimeIsTokenError(%[[TOKEN]])
-  // CHECK: %[[TRUE:.*]] = arith.constant true
-  // CHECK: %[[NOT_ERROR:.*]] = arith.xori %[[IS_ERROR]], %[[TRUE]] : i1
-  // CHECK: cf.assert %[[NOT_ERROR]]
-  // CHECK-NEXT: return
-  async.await %token : !async.token
-  return
-}
-
-// Function outlined from the async.execute operation.
-// CHECK-LABEL: func private @async_execute_fn(%arg0: f32, %arg1: memref<1xf32>)
-// CHECK-SAME: -> !llvm.ptr<i8>
-
-// Create token for return op, and mark a function as a coroutine.
-// CHECK: %[[RET:.*]] = call @mlirAsyncRuntimeCreateToken()
-// CHECK: %[[HDL:.*]] = llvm.intr.coro.begin
-
-// Pass a suspended coroutine to the async runtime.
-// CHECK: %[[STATE:.*]] = llvm.intr.coro.save
-// CHECK: %[[RESUME:.*]] = llvm.mlir.addressof @__resume
-// CHECK: call @mlirAsyncRuntimeExecute(%[[HDL]], %[[RESUME]])
-// CHECK: %[[SUSPENDED:.*]] = llvm.intr.coro.suspend %[[STATE]]
-
-// Decide the next block based on the code returned from suspend.
-// CHECK: %[[SEXT:.*]] = llvm.sext %[[SUSPENDED]] : i8 to i32
-// CHECK: llvm.switch %[[SEXT]] : i32, ^[[SUSPEND:[b0-9]+]]
-// CHECK-NEXT: 0: ^[[RESUME:[b0-9]+]]
-// CHECK-NEXT: 1: ^[[CLEANUP:[b0-9]+]]
-
-// Resume coroutine after suspension.
-// CHECK: ^[[RESUME]]:
-// CHECK: memref.store %arg0, %arg1[%c0] : memref<1xf32>
-// CHECK: call @mlirAsyncRuntimeEmplaceToken(%[[RET]])
-
-// Delete coroutine.
-// CHECK: ^[[CLEANUP]]:
-// CHECK: %[[MEM:.*]] = llvm.intr.coro.free
-// CHECK: llvm.call @free(%[[MEM]])
-
-// Suspend coroutine, and also a return statement for ramp function.
-// CHECK: ^[[SUSPEND]]:
-// CHECK: llvm.intr.coro.end
-// CHECK: return %[[RET]]
-
-// -----
-
-// CHECK-LABEL: execute_and_return_f32
-func.func @execute_and_return_f32() -> f32 {
- // CHECK: %[[RET:.*]]:2 = call @async_execute_fn
-  %token, %result = async.execute -> !async.value<f32> {
-    %c0 = arith.constant 123.0 : f32
-    async.yield %c0 : f32
-  }
-
-  // CHECK: %[[STORAGE:.*]] = call @mlirAsyncRuntimeGetValueStorage(%[[RET]]#1)
-  // CHECK: %[[ST_F32:.*]] = llvm.bitcast %[[STORAGE]]
-  // CHECK: %[[LOADED:.*]] = llvm.load %[[ST_F32]] :  !llvm.ptr<f32>
-  %0 = async.await %result : !async.value<f32>
-
-  return %0 : f32
-}
-
-// Function outlined from the async.execute operation.
-// CHECK-LABEL: func private @async_execute_fn()
-// CHECK: %[[TOKEN:.*]] = call @mlirAsyncRuntimeCreateToken()
-// CHECK: %[[VALUE:.*]] = call @mlirAsyncRuntimeCreateValue
-// CHECK: %[[HDL:.*]] = llvm.intr.coro.begin
-
-// Suspend coroutine in the beginning.
-// CHECK: call @mlirAsyncRuntimeExecute(%[[HDL]],
-// CHECK: llvm.intr.coro.suspend
-
-// Emplace result value.
-// CHECK: %[[CST:.*]] = arith.constant 1.230000e+02 : f32
-// CHECK: %[[STORAGE:.*]] = call @mlirAsyncRuntimeGetValueStorage(%[[VALUE]])
-// CHECK: %[[ST_F32:.*]] = llvm.bitcast %[[STORAGE]]
-// CHECK: llvm.store %[[CST]], %[[ST_F32]] : !llvm.ptr<f32>
-// CHECK: call @mlirAsyncRuntimeEmplaceValue(%[[VALUE]])
-
-// Emplace result token.
-// CHECK: call @mlirAsyncRuntimeEmplaceToken(%[[TOKEN]])
-
-// -----
-
-// CHECK-LABEL: @await_and_resume_group
-func.func @await_and_resume_group() {
-  %c = arith.constant 1 : index
-  %0 = async.coro.id
-  // CHECK: %[[HDL:.*]] = llvm.intr.coro.begin
-  %1 = async.coro.begin %0
-  // CHECK: %[[TOKEN:.*]] = call @mlirAsyncRuntimeCreateGroup
-  %2 = async.runtime.create_group %c : !async.group
-  // CHECK: %[[RESUME:.*]] = llvm.mlir.addressof @__resume
-  // CHECK: call @mlirAsyncRuntimeAwaitAllInGroupAndExecute
-  // CHECK-SAME: (%[[TOKEN]], %[[HDL]], %[[RESUME]])
-  async.runtime.await_and_resume %2, %1 : !async.group
-  return
-}



More information about the Mlir-commits mailing list