[Mlir-commits] [mlir] 856056d - [mlir][LLVMIR] Add support for va_start/copy/end intrinsics
Min-Yih Hsu
llvmlistbot at llvm.org
Mon Jun 27 09:47:49 PDT 2022
Author: Min-Yih Hsu
Date: 2022-06-27T09:46:40-07:00
New Revision: 856056d1b0b34be3f72848dd28b32a800f551ad5
URL: https://github.com/llvm/llvm-project/commit/856056d1b0b34be3f72848dd28b32a800f551ad5
DIFF: https://github.com/llvm/llvm-project/commit/856056d1b0b34be3f72848dd28b32a800f551ad5.diff
LOG: [mlir][LLVMIR] Add support for va_start/copy/end intrinsics
This patch adds three new LLVM intrinsic operations: llvm.intr.vastart/copy/end.
And its translation from LLVM IR.
This effectively removes a restriction, imposed by 0126dcf1f0a1, where
non-external functions in LLVM dialect cannot be variadic. At that time
it was not clear how LLVM intrinsics are going to be modeled, which
indirectly affects va_start/copy/end, the core intrinsics used in
variadic functions. But since we have LLVM intrinsics as normal
MLIR operations, it's not a problem anymore.
Differential Revision: https://reviews.llvm.org/D127540
Added:
mlir/test/mlir-cpu-runner/x86-varargs.mlir
Modified:
mlir/docs/TargetLLVMIR.md
mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp
mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir
mlir/test/Dialect/LLVMIR/func.mlir
mlir/test/Dialect/LLVMIR/roundtrip.mlir
mlir/test/Target/LLVMIR/Import/basic.ll
mlir/test/Target/LLVMIR/Import/intrinsic.ll
mlir/test/Target/LLVMIR/llvmir.mlir
mlir/test/mlir-cpu-runner/lit.local.cfg
Removed:
################################################################################
diff --git a/mlir/docs/TargetLLVMIR.md b/mlir/docs/TargetLLVMIR.md
index bd36847c23347..2c1d222f406ef 100644
--- a/mlir/docs/TargetLLVMIR.md
+++ b/mlir/docs/TargetLLVMIR.md
@@ -182,6 +182,8 @@ Function types are converted to LLVM dialect function types as follows:
individual pointers;
- the conversion of `memref`-typed arguments is subject to
[calling conventions](TargetLLVMIR.md#calling-conventions).
+- if a function type has boolean attribute `func.varargs` being set, the
+ converted LLVM function will be variadic.
Examples:
@@ -252,6 +254,11 @@ Examples:
// potentially with other non-memref typed results.
!llvm.func<struct<(struct<(ptr<f32>, ptr<f32>, i64)>,
struct<(ptr<double>, ptr<double>, i64)>)> ()>
+
+// If "func.varargs" attribute is set:
+(i32) -> () attributes { "func.varargs" = true }
+// the corresponding LLVM function will be variadic:
+!llvm.func<void (i32, ...)>
```
Conversion patterns are available to convert built-in function operations and
@@ -747,6 +754,18 @@ which introduces significant overhead. In such situations, auxiliary interface
functions are executed on host and only pass the values through device function
invocation mechanism.
+Limitation: Right now we cannot generate C interface for variadic functions,
+regardless of being non-external or external. Because C functions are unable to
+"forward" variadic arguments like this:
+```c
+void bar(int, ...);
+
+void foo(int x, ...) {
+ // ERROR: no way to forward variadic arguments.
+ void bar(x, ...);
+}
+```
+
### Address Computation
Accesses to a memref element are transformed into an access to an element of the
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
index 8427eb18dd423..c7e947b622fb4 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMIntrinsicOps.td
@@ -186,6 +186,28 @@ def LLVM_CoroResumeOp : LLVM_IntrOp<"coro.resume", [], [], [], 0> {
let assemblyFormat = "$handle attr-dict";
}
+//
+// Variadic function intrinsics.
+//
+
+def LLVM_VaStartOp : LLVM_ZeroResultIntrOp<"vastart">,
+ Arguments<(ins LLVM_i8Ptr:$arg_list)> {
+ let assemblyFormat = "$arg_list attr-dict";
+ let summary = "Initializes `arg_list` for subsequent variadic argument extractions.";
+}
+
+def LLVM_VaCopyOp : LLVM_ZeroResultIntrOp<"vacopy">,
+ Arguments<(ins LLVM_i8Ptr:$dest_list, LLVM_i8Ptr:$src_list)> {
+ let assemblyFormat = "$src_list `to` $dest_list attr-dict";
+ let summary = "Copies the current argument position from `src_list` to `dest_list`.";
+}
+
+def LLVM_VaEndOp : LLVM_ZeroResultIntrOp<"vaend">,
+ Arguments<(ins LLVM_i8Ptr:$arg_list)> {
+ let assemblyFormat = "$arg_list attr-dict";
+ let summary = "Destroys `arg_list`, which has been initialized by `intr.vastart` or `intr.vacopy`.";
+}
+
//
// Exception handling intrinsics.
//
diff --git a/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp b/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp
index 43673f1cfa43b..53da615981c3f 100644
--- a/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp
+++ b/mlir/lib/Conversion/FuncToLLVM/FuncToLLVM.cpp
@@ -373,6 +373,10 @@ struct FuncOpConversion : public FuncOpConversionBase {
if (funcOp->getAttrOfType<UnitAttr>(
LLVM::LLVMDialect::getEmitCWrapperAttrName())) {
+ if (newFuncOp.isVarArg())
+ return funcOp->emitError("C interface for variadic functions is not "
+ "supported yet.");
+
if (newFuncOp.isExternal())
wrapExternalFunction(rewriter, funcOp.getLoc(), *getTypeConverter(),
funcOp, newFuncOp);
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index 63a89d5ec10e5..8bc63fc71fe8c 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -2132,7 +2132,6 @@ LogicalResult ShuffleVectorOp::verify() {
// Add the entry block to the function.
Block *LLVMFuncOp::addEntryBlock() {
assert(empty() && "function already has an entry block");
- assert(!isVarArg() && "unimplemented: non-external variadic functions");
auto *entry = new Block;
push_back(entry);
@@ -2331,9 +2330,6 @@ LogicalResult LLVMFuncOp::verify() {
return success();
}
- if (isVarArg())
- return emitOpError("only external functions can be variadic");
-
return success();
}
diff --git a/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir b/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir
index 223f2fdc23a77..0aa185fe25b91 100644
--- a/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/convert-funcs.mlir
@@ -62,6 +62,17 @@ func.func @indirect_call(%arg0: (f32) -> i32, %arg1: f32) -> i32 {
return %0 : i32
}
+func.func @variadic_func(%arg0: i32) attributes { "func.varargs" = true } {
+ return
+}
+
// -----
func.func private @badllvmlinkage(i32) attributes { "llvm.linkage" = 3 : i64 } // expected-error {{Contains llvm.linkage attribute not of type LLVM::LinkageAttr}}
+
+// -----
+
+// expected-error at +1{{C interface for variadic functions is not supported yet.}}
+func.func @variadic_func(%arg0: i32) attributes { "func.varargs" = true, "llvm.emit_c_interface" } {
+ return
+}
diff --git a/mlir/test/Dialect/LLVMIR/func.mlir b/mlir/test/Dialect/LLVMIR/func.mlir
index 852ff1f6191f6..b19846eaca7df 100644
--- a/mlir/test/Dialect/LLVMIR/func.mlir
+++ b/mlir/test/Dialect/LLVMIR/func.mlir
@@ -159,6 +159,11 @@ module {
llvm.func weak fastcc @cconv3() {
llvm.return
}
+
+ // CHECK-LABEL: llvm.func @variadic_def
+ llvm.func @variadic_def(...) {
+ llvm.return
+ }
}
// -----
@@ -232,15 +237,6 @@ module {
// -----
-module {
- // expected-error at +1 {{only external functions can be variadic}}
- llvm.func @variadic_def(...) {
- llvm.return
- }
-}
-
-// -----
-
module {
// expected-error at +1 {{cannot attach result attributes to functions with a void return}}
llvm.func @variadic_def() -> (!llvm.void {llvm.noalias})
diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
index 9af27d4d1d39a..1c9c78dc6cfd9 100644
--- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir
+++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
@@ -496,3 +496,29 @@ module {
llvm.return
}
}
+
+// CHECK-LABEL: llvm.func @vararg_func
+llvm.func @vararg_func(%arg0: i32, ...) {
+ // CHECK: %{{.*}} = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %{{.*}} = llvm.mlir.constant(1 : i32) : i32
+ %0 = llvm.mlir.constant(1 : i32) : i32
+ %1 = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[ALLOCA0:.+]] = llvm.alloca %{{.*}} x !llvm.struct<"struct.va_list", (ptr<i8>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr<struct<"struct.va_list", (ptr<i8>)>>
+ // CHECK: %[[CAST0:.+]] = llvm.bitcast %[[ALLOCA0]] : !llvm.ptr<struct<"struct.va_list", (ptr<i8>)>> to !llvm.ptr<i8>
+ %2 = llvm.alloca %1 x !llvm.struct<"struct.va_list", (ptr<i8>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr<struct<"struct.va_list", (ptr<i8>)>>
+ %3 = llvm.bitcast %2 : !llvm.ptr<struct<"struct.va_list", (ptr<i8>)>> to !llvm.ptr<i8>
+ // CHECK: llvm.intr.vastart %[[CAST0]]
+ llvm.intr.vastart %3
+ // CHECK: %[[ALLOCA1:.+]] = llvm.alloca %{{.*}} x !llvm.ptr<i8> {alignment = 8 : i64} : (i32) -> !llvm.ptr<ptr<i8>>
+ // CHECK: %[[CAST1:.+]] = llvm.bitcast %[[ALLOCA1]] : !llvm.ptr<ptr<i8>> to !llvm.ptr<i8>
+ %4 = llvm.alloca %0 x !llvm.ptr<i8> {alignment = 8 : i64} : (i32) -> !llvm.ptr<ptr<i8>>
+ %5 = llvm.bitcast %4 : !llvm.ptr<ptr<i8>> to !llvm.ptr<i8>
+ // CHECK: llvm.intr.vacopy %[[CAST0]] to %[[CAST1]]
+ llvm.intr.vacopy %3 to %5
+ // CHECK: llvm.intr.vaend %[[CAST1]]
+ // CHECK: llvm.intr.vaend %[[CAST0]]
+ llvm.intr.vaend %5
+ llvm.intr.vaend %3
+ // CHECK: llvm.return
+ llvm.return
+}
diff --git a/mlir/test/Target/LLVMIR/Import/basic.ll b/mlir/test/Target/LLVMIR/Import/basic.ll
index 3691448eb6ee1..c26609f255794 100644
--- a/mlir/test/Target/LLVMIR/Import/basic.ll
+++ b/mlir/test/Target/LLVMIR/Import/basic.ll
@@ -629,3 +629,34 @@ define void @unreachable_inst() {
; CHECK: llvm.unreachable
unreachable
}
+
+; Varadic function definition
+%struct.va_list = type { i8* }
+
+declare void @llvm.va_start(i8*)
+declare void @llvm.va_copy(i8*, i8*)
+declare void @llvm.va_end(i8*)
+
+; CHECK-LABEL: llvm.func @variadic_function
+define void @variadic_function(i32 %X, ...) {
+ ; CHECK: %[[ALLOCA0:.+]] = llvm.alloca %{{.*}} x !llvm.struct<"struct.va_list", (ptr<i8>)> {{.*}} : (i32) -> !llvm.ptr<struct<"struct.va_list", (ptr<i8>)>>
+ %ap = alloca %struct.va_list
+ ; CHECK: %[[CAST0:.+]] = llvm.bitcast %[[ALLOCA0]] : !llvm.ptr<struct<"struct.va_list", (ptr<i8>)>> to !llvm.ptr<i8>
+ %ap2 = bitcast %struct.va_list* %ap to i8*
+ ; CHECK: llvm.intr.vastart %[[CAST0]]
+ call void @llvm.va_start(i8* %ap2)
+
+ ; CHECK: %[[ALLOCA1:.+]] = llvm.alloca %{{.*}} x !llvm.ptr<i8> {{.*}} : (i32) -> !llvm.ptr<ptr<i8>>
+ %aq = alloca i8*
+ ; CHECK: %[[CAST1:.+]] = llvm.bitcast %[[ALLOCA1]] : !llvm.ptr<ptr<i8>> to !llvm.ptr<i8>
+ %aq2 = bitcast i8** %aq to i8*
+ ; CHECK: llvm.intr.vacopy %[[CAST0]] to %[[CAST1]]
+ call void @llvm.va_copy(i8* %aq2, i8* %ap2)
+ ; CHECK: llvm.intr.vaend %[[CAST1]]
+ call void @llvm.va_end(i8* %aq2)
+
+ ; CHECK: llvm.intr.vaend %[[CAST0]]
+ call void @llvm.va_end(i8* %ap2)
+ ; CHECK: llvm.return
+ ret void
+}
diff --git a/mlir/test/Target/LLVMIR/Import/intrinsic.ll b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
index 2fa1283ef2ca0..7d0351d81d96e 100644
--- a/mlir/test/Target/LLVMIR/Import/intrinsic.ll
+++ b/mlir/test/Target/LLVMIR/Import/intrinsic.ll
@@ -385,6 +385,17 @@ define void @umul_with_overflow_test(i32 %0, i32 %1, <8 x i32> %2, <8 x i32> %3)
ret void
}
+; CHECK-LABEL: llvm.func @va_intrinsics_test
+define void @va_intrinsics_test(i8* %0, i8* %1) {
+; CHECK: llvm.intr.vastart %{{.*}}
+ call void @llvm.va_start(i8* %0)
+; CHECK: llvm.intr.vacopy %{{.*}} to %{{.*}}
+ call void @llvm.va_copy(i8* %1, i8* %0)
+; CHECK: llvm.intr.vaend %{{.*}}
+ call void @llvm.va_end(i8* %0)
+ ret void
+}
+
; CHECK-LABEL: llvm.func @coro_id
define void @coro_id(i32 %0, i8* %1) {
; CHECK: llvm.intr.coro.id %{{.*}}, %{{.*}}, %{{.*}}, %{{.*}} : !llvm.token
@@ -686,6 +697,9 @@ declare void @llvm.coro.resume(i8*)
declare i32 @llvm.eh.typeid.for(i8*)
declare i8* @llvm.stacksave()
declare void @llvm.stackrestore(i8*)
+declare void @llvm.va_start(i8*)
+declare void @llvm.va_copy(i8*, i8*)
+declare void @llvm.va_end(i8*)
declare <8 x i32> @llvm.vp.add.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
declare <8 x i32> @llvm.vp.sub.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
declare <8 x i32> @llvm.vp.mul.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir
index a5b4bd65a58f2..9a30a9d4a719a 100644
--- a/mlir/test/Target/LLVMIR/llvmir.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir.mlir
@@ -1909,3 +1909,31 @@ llvm.func @duplicate_block_with_args_in_switch(%cond : i32, %arg1: f32, %arg2: f
llvm.func @bar(f32)
llvm.func @baz()
llvm.func @qux(f32)
+
+// -----
+
+// Varaidic function definition
+
+// CHECK: %struct.va_list = type { ptr }
+
+// CHECK: define void @vararg_function(i32 %{{.*}}, ...)
+llvm.func @vararg_function(%arg0: i32, ...) {
+ %0 = llvm.mlir.constant(1 : i32) : i32
+ %1 = llvm.mlir.constant(1 : i32) : i32
+ // CHECK: %[[ALLOCA0:.+]] = alloca %struct.va_list, align 8
+ %2 = llvm.alloca %1 x !llvm.struct<"struct.va_list", (ptr<i8>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr<struct<"struct.va_list", (ptr<i8>)>>
+ %3 = llvm.bitcast %2 : !llvm.ptr<struct<"struct.va_list", (ptr<i8>)>> to !llvm.ptr<i8>
+ // CHECK: call void @llvm.va_start(ptr %[[ALLOCA0]])
+ llvm.intr.vastart %3
+ // CHECK: %[[ALLOCA1:.+]] = alloca ptr, align 8
+ %4 = llvm.alloca %0 x !llvm.ptr<i8> {alignment = 8 : i64} : (i32) -> !llvm.ptr<ptr<i8>>
+ %5 = llvm.bitcast %4 : !llvm.ptr<ptr<i8>> to !llvm.ptr<i8>
+ // CHECK: call void @llvm.va_copy(ptr %[[ALLOCA1]], ptr %[[ALLOCA0]])
+ llvm.intr.vacopy %3 to %5
+ // CHECK: call void @llvm.va_end(ptr %[[ALLOCA1]])
+ // CHECK: call void @llvm.va_end(ptr %[[ALLOCA0]])
+ llvm.intr.vaend %5
+ llvm.intr.vaend %3
+ // CHECK: ret void
+ llvm.return
+}
diff --git a/mlir/test/mlir-cpu-runner/lit.local.cfg b/mlir/test/mlir-cpu-runner/lit.local.cfg
index 5951990786b68..5df5b0a38bc6c 100644
--- a/mlir/test/mlir-cpu-runner/lit.local.cfg
+++ b/mlir/test/mlir-cpu-runner/lit.local.cfg
@@ -12,3 +12,5 @@ if 'msan' in config.available_features:
if 'native' not in config.available_features:
config.unsupported = True
+config.available_features.add(
+ config.root.native_target.lower() + '-native-target')
diff --git a/mlir/test/mlir-cpu-runner/x86-varargs.mlir b/mlir/test/mlir-cpu-runner/x86-varargs.mlir
new file mode 100644
index 0000000000000..be33c31b13544
--- /dev/null
+++ b/mlir/test/mlir-cpu-runner/x86-varargs.mlir
@@ -0,0 +1,66 @@
+// RUN: mlir-cpu-runner %s -e caller --entry-point-result=i32 | FileCheck %s
+// Varaidic argument list (va_list) and the extraction logics are ABI-specific.
+// REQUIRES: x86-native-target
+
+// Check if variadic functions can be called and the correct variadic argument
+// can be extracted.
+
+llvm.func @caller() -> i32 {
+ %0 = llvm.mlir.constant(3 : i32) : i32
+ %1 = llvm.mlir.constant(2 : i32) : i32
+ %2 = llvm.mlir.constant(1 : i32) : i32
+ %3 = llvm.call @foo(%2, %1, %0) : (i32, i32, i32) -> i32
+ llvm.return %3 : i32
+}
+
+// Equivalent C code:
+// int foo(int X, ...) {
+// va_list args;
+// va_start(args, X);
+// int num = va_arg(args, int);
+// va_end(args);
+// return num;
+//}
+llvm.func @foo(%arg0: i32, ...) -> i32 {
+ %0 = llvm.mlir.constant(8 : i64) : i64
+ %1 = llvm.mlir.constant(2 : i32) : i32
+ %2 = llvm.mlir.constant(0 : i64) : i64
+ %3 = llvm.mlir.constant(0 : i64) : i64
+ %4 = llvm.mlir.constant(8 : i32) : i32
+ %5 = llvm.mlir.constant(3 : i32) : i32
+ %6 = llvm.mlir.constant(0 : i64) : i64
+ %7 = llvm.mlir.constant(0 : i64) : i64
+ %8 = llvm.mlir.constant(41 : i32) : i32
+ %9 = llvm.mlir.constant(0 : i32) : i32
+ %10 = llvm.mlir.constant(0 : i64) : i64
+ %11 = llvm.mlir.constant(0 : i64) : i64
+ %12 = llvm.mlir.constant(1 : i32) : i32
+ %13 = llvm.alloca %12 x !llvm.array<1 x struct<"struct.va_list", (i32, i32, ptr<i8>, ptr<i8>)>> {alignment = 8 : i64} : (i32) -> !llvm.ptr<array<1 x struct<"struct.va_list", (i32, i32, ptr<i8>, ptr<i8>)>>>
+ %14 = llvm.bitcast %13 : !llvm.ptr<array<1 x struct<"struct.va_list", (i32, i32, ptr<i8>, ptr<i8>)>>> to !llvm.ptr<i8>
+ llvm.intr.vastart %14
+ %15 = llvm.getelementptr %13[%11, %10, 0] : (!llvm.ptr<array<1 x struct<"struct.va_list", (i32, i32, ptr<i8>, ptr<i8>)>>>, i64, i64) -> !llvm.ptr<i32>
+ %16 = llvm.load %15 : !llvm.ptr<i32>
+ %17 = llvm.icmp "ult" %16, %8 : i32
+ llvm.cond_br %17, ^bb1, ^bb2
+^bb1: // pred: ^bb0
+ %18 = llvm.getelementptr %13[%7, %6, 3] : (!llvm.ptr<array<1 x struct<"struct.va_list", (i32, i32, ptr<i8>, ptr<i8>)>>>, i64, i64) -> !llvm.ptr<ptr<i8>>
+ %19 = llvm.load %18 : !llvm.ptr<ptr<i8>>
+ %20 = llvm.zext %16 : i32 to i64
+ %21 = llvm.getelementptr %19[%20] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
+ %22 = llvm.add %16, %4 : i32
+ llvm.store %22, %15 : !llvm.ptr<i32>
+ llvm.br ^bb3(%21 : !llvm.ptr<i8>)
+^bb2: // pred: ^bb0
+ %23 = llvm.getelementptr %13[%3, %2, 2] : (!llvm.ptr<array<1 x struct<"struct.va_list", (i32, i32, ptr<i8>, ptr<i8>)>>>, i64, i64) -> !llvm.ptr<ptr<i8>>
+ %24 = llvm.load %23 : !llvm.ptr<ptr<i8>>
+ %25 = llvm.getelementptr %24[%0] : (!llvm.ptr<i8>, i64) -> !llvm.ptr<i8>
+ llvm.store %25, %23 : !llvm.ptr<ptr<i8>>
+ llvm.br ^bb3(%24 : !llvm.ptr<i8>)
+^bb3(%26: !llvm.ptr<i8>): // 2 preds: ^bb1, ^bb2
+ %27 = llvm.bitcast %26 : !llvm.ptr<i8> to !llvm.ptr<i32>
+ %28 = llvm.load %27 : !llvm.ptr<i32>
+ llvm.intr.vaend %14
+ llvm.return %28 : i32
+}
+
+// CHECK: 2
More information about the Mlir-commits
mailing list