[Mlir-commits] [mlir] [mlir][mesh] Mandatory Communicator (PR #133280)
Frank Schlimbach
llvmlistbot at llvm.org
Fri Mar 28 01:13:06 PDT 2025
Sergio =?utf-8?q?Sánchez_Ramírez?=,
Sergio =?utf-8?q?Sánchez_Ramírez?=,
Sergio =?utf-8?q?Sánchez_Ramírez?=,
Sergio =?utf-8?q?Sánchez_Ramírez?=,
Sergio =?utf-8?q?Sánchez_Ramírez?=,"Schlimbach, Frank"
<frank.schlimbach at intel.com>,"Schlimbach, Frank" <frank.schlimbach at intel.com>
=?utf-8?q?,?=Frank Schlimbach <frank.schlimbach at intel.com>,Frank
Schlimbach <frank.schlimbach at intel.com>
Message-ID:
In-Reply-To: <llvm.org/llvm/llvm-project/pull/133280 at github.com>
https://github.com/fschlimb updated https://github.com/llvm/llvm-project/pull/133280
>From 99e37299bd27e8e05d1d97182cab447a10795f0e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergio=20S=C3=A1nchez=20Ram=C3=ADrez?=
<sergio.sanchez.ramirez+git at bsc.es>
Date: Sat, 1 Feb 2025 20:43:30 +0100
Subject: [PATCH 01/10] Revert "Remove MPI_Comm type"
This reverts commit 6abba5a37d5ea73c2b177581db9d476da4a26c91.
---
mlir/include/mlir/Dialect/MPI/IR/MPIOps.td | 132 +++++++++++++------
mlir/include/mlir/Dialect/MPI/IR/MPITypes.td | 11 ++
mlir/test/Dialect/MPI/ops.mlir | 24 ++++
3 files changed, 127 insertions(+), 40 deletions(-)
diff --git a/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td b/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
index db28bd09678f8..3c6f5a8ac0ea8 100644
--- a/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
+++ b/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
@@ -37,26 +37,43 @@ def MPI_InitOp : MPI_Op<"init", []> {
let assemblyFormat = "attr-dict (`:` type($retval)^)?";
}
+//===----------------------------------------------------------------------===//
+// CommWorldOp
+//===----------------------------------------------------------------------===//
+
+def MPI_CommWorldOp : MPI_Op<"comm_world", []> {
+ let summary = "Get the World communicator, equivalent to `MPI_COMM_WORLD`";
+ let description = [{
+ This operation returns the predefined MPI_COMM_WORLD communicator.
+ }];
+
+ let results = (outs MPI_Comm : $comm);
+
+ let assemblyFormat = "attr-dict `:` type(results)";
+}
+
//===----------------------------------------------------------------------===//
// CommRankOp
//===----------------------------------------------------------------------===//
def MPI_CommRankOp : MPI_Op<"comm_rank", []> {
let summary = "Get the current rank, equivalent to "
- "`MPI_Comm_rank(MPI_COMM_WORLD, &rank)`";
+ "`MPI_Comm_rank(comm, &rank)`";
let description = [{
- Communicators other than `MPI_COMM_WORLD` are not supported for now.
+ If communicator is not specified, `MPI_COMM_WORLD` is used by default.
This operation can optionally return an `!mpi.retval` value that can be used
to check for errors.
}];
+ let arguments = (ins Optional<MPI_Comm> : $comm);
+
let results = (
outs Optional<MPI_Retval> : $retval,
I32 : $rank
);
- let assemblyFormat = "attr-dict `:` type(results)";
+ let assemblyFormat = "(`(` $comm ^ `)`)? attr-dict `:` type(results)";
}
//===----------------------------------------------------------------------===//
@@ -65,20 +82,50 @@ def MPI_CommRankOp : MPI_Op<"comm_rank", []> {
def MPI_CommSizeOp : MPI_Op<"comm_size", []> {
let summary = "Get the size of the group associated to the communicator, "
- "equivalent to `MPI_Comm_size(MPI_COMM_WORLD, &size)`";
+ "equivalent to `MPI_Comm_size(comm, &size)`";
let description = [{
- Communicators other than `MPI_COMM_WORLD` are not supported for now.
+ If communicator is not specified, `MPI_COMM_WORLD` is used by default.
This operation can optionally return an `!mpi.retval` value that can be used
to check for errors.
}];
+ let arguments = (ins Optional<MPI_Comm> : $comm);
+
let results = (
outs Optional<MPI_Retval> : $retval,
I32 : $size
);
- let assemblyFormat = "attr-dict `:` type(results)";
+ let assemblyFormat = "(`(` $comm ^ `)`)? attr-dict `:` type(results)";
+}
+
+//===----------------------------------------------------------------------===//
+// CommSplitOp
+//===----------------------------------------------------------------------===//
+
+def MPI_CommSplit : MPI_Op<"comm_split", []> {
+ let summary = "Partition the group associated to the given communicator into "
+ "disjoint subgroups";
+ let description = [{
+ This operation splits the communicator into multiple sub-communicators.
+ The color value determines the group of processes that will be part of the
+ new communicator. The key value determines the rank of the calling process
+ in the new communicator.
+
+ This operation can optionally return an `!mpi.retval` value that can be used
+ to check for errors.
+ }];
+
+ let arguments = (ins MPI_Comm : $comm, I32 : $color, I32 : $key);
+
+ let results = (
+ outs MPI_Comm : $newcomm,
+ Optional<MPI_Retval> : $retval
+ );
+
+ let assemblyFormat = "`(` $comm `,` $color `,` $key `)` attr-dict `:` "
+ "type(results)";
}
//===----------------------------------------------------------------------===//
@@ -87,13 +134,13 @@ def MPI_CommSizeOp : MPI_Op<"comm_size", []> {
def MPI_SendOp : MPI_Op<"send", []> {
let summary =
- "Equivalent to `MPI_Send(ptr, size, dtype, dest, tag, MPI_COMM_WORLD)`";
+ "Equivalent to `MPI_Send(ptr, size, dtype, dest, tag, comm)`";
let description = [{
MPI_Send performs a blocking send of `size` elements of type `dtype` to rank
`dest`. The `tag` value and communicator enables the library to determine
the matching of multiple sends and receives between the same ranks.
- Communicators other than `MPI_COMM_WORLD` are not supported for now.
+ If communicator is not specified, `MPI_COMM_WORLD` is used by default.
This operation can optionally return an `!mpi.retval` value that can be used
to check for errors.
@@ -102,12 +149,13 @@ def MPI_SendOp : MPI_Op<"send", []> {
let arguments = (
ins AnyMemRef : $ref,
I32 : $tag,
- I32 : $dest
+ I32 : $dest,
+ Optional<MPI_Comm> : $comm
);
let results = (outs Optional<MPI_Retval>:$retval);
- let assemblyFormat = "`(` $ref `,` $tag `,` $dest `)` attr-dict `:` "
+ let assemblyFormat = "`(` $ref `,` $tag `,` $dest (`,` $comm ^)? `)` attr-dict `:` "
"type($ref) `,` type($tag) `,` type($dest)"
"(`->` type($retval)^)?";
let hasCanonicalizer = 1;
@@ -119,14 +167,14 @@ def MPI_SendOp : MPI_Op<"send", []> {
def MPI_ISendOp : MPI_Op<"isend", []> {
let summary =
- "Equivalent to `MPI_Isend(ptr, size, dtype, dest, tag, MPI_COMM_WORLD)`";
+ "Equivalent to `MPI_Isend(ptr, size, dtype, dest, tag, comm)`";
let description = [{
MPI_Isend begins a non-blocking send of `size` elements of type `dtype` to
rank `dest`. The `tag` value and communicator enables the library to
determine the matching of multiple sends and receives between the same
ranks.
- Communicators other than `MPI_COMM_WORLD` are not supported for now.
+ If communicator is not specified, `MPI_COMM_WORLD` is used by default.
This operation can optionally return an `!mpi.retval` value that can be used
to check for errors.
@@ -135,7 +183,8 @@ def MPI_ISendOp : MPI_Op<"isend", []> {
let arguments = (
ins AnyMemRef : $ref,
I32 : $tag,
- I32 : $rank
+ I32 : $rank,
+ Optional<MPI_Comm> : $comm
);
let results = (
@@ -143,9 +192,9 @@ def MPI_ISendOp : MPI_Op<"isend", []> {
MPI_Request : $req
);
- let assemblyFormat = "`(` $ref `,` $tag `,` $rank `)` attr-dict "
+ let assemblyFormat = "`(` $ref `,` $tag `,` $rank (`,` $comm ^)?`)` attr-dict "
"`:` type($ref) `,` type($tag) `,` type($rank) "
- "`->` type(results)";
+ "(`,` type($comm) ^)? `->` type(results)";
let hasCanonicalizer = 1;
}
@@ -154,15 +203,15 @@ def MPI_ISendOp : MPI_Op<"isend", []> {
//===----------------------------------------------------------------------===//
def MPI_RecvOp : MPI_Op<"recv", []> {
- let summary = "Equivalent to `MPI_Recv(ptr, size, dtype, source, tag, "
- "MPI_COMM_WORLD, MPI_STATUS_IGNORE)`";
+ let summary = "Equivalent to `MPI_Recv(ptr, size, dtype, dest, tag, "
+ "comm, MPI_STATUS_IGNORE)`";
let description = [{
MPI_Recv performs a blocking receive of `size` elements of type `dtype`
from rank `source`. The `tag` value and communicator enables the library to
determine the matching of multiple sends and receives between the same
ranks.
- Communicators other than `MPI_COMM_WORLD` are not supported for now.
+ If communicator is not specified, `MPI_COMM_WORLD` is used by default.
The MPI_Status is set to `MPI_STATUS_IGNORE`, as the status object
is not yet ported to MLIR.
@@ -172,14 +221,15 @@ def MPI_RecvOp : MPI_Op<"recv", []> {
let arguments = (
ins AnyMemRef : $ref,
- I32 : $tag, I32 : $source
+ I32 : $tag, I32 : $source,
+ Optional<MPI_Comm> : $comm
);
let results = (outs Optional<MPI_Retval>:$retval);
- let assemblyFormat = "`(` $ref `,` $tag `,` $source `)` attr-dict `:` "
- "type($ref) `,` type($tag) `,` type($source)"
- "(`->` type($retval)^)?";
+ let assemblyFormat = "`(` $ref `,` $tag `,` $source (`,` $comm ^)?`)` attr-dict"
+ " `:` type($ref) `,` type($tag) `,` type($source) "
+ "(`,` type($comm) ^)? (`->` type($retval)^)?";
let hasCanonicalizer = 1;
}
@@ -189,14 +239,14 @@ def MPI_RecvOp : MPI_Op<"recv", []> {
def MPI_IRecvOp : MPI_Op<"irecv", []> {
let summary = "Equivalent to `MPI_Irecv(ptr, size, dtype, dest, tag, "
- "MPI_COMM_WORLD, &req)`";
+ "comm, &req)`";
let description = [{
MPI_Irecv begins a non-blocking receive of `size` elements of type `dtype`
from rank `dest`. The `tag` value and communicator enables the library to
determine the matching of multiple sends and receives between the same
ranks.
- Communicators other than `MPI_COMM_WORLD` are not supported for now.
+ If communicator is not specified, `MPI_COMM_WORLD` is used by default.
This operation can optionally return an `!mpi.retval` value that can be used
to check for errors.
@@ -205,7 +255,8 @@ def MPI_IRecvOp : MPI_Op<"irecv", []> {
let arguments = (
ins AnyMemRef : $ref,
I32 : $tag,
- I32 : $rank
+ I32 : $rank,
+ Optional<MPI_Comm> : $comm
);
let results = (
@@ -213,9 +264,9 @@ def MPI_IRecvOp : MPI_Op<"irecv", []> {
MPI_Request : $req
);
- let assemblyFormat = "`(` $ref `,` $tag `,` $rank `)` attr-dict `:`"
- "type($ref) `,` type($tag) `,` type($rank) `->`"
- "type(results)";
+ let assemblyFormat = "`(` $ref `,` $tag `,` $rank (`,` $comm ^)?`)` attr-dict "
+ "`:` type($ref) `,` type($tag) `,` type($rank)"
+ "(`,` type($comm) ^)? `->` type(results)";
let hasCanonicalizer = 1;
}
@@ -224,8 +275,7 @@ def MPI_IRecvOp : MPI_Op<"irecv", []> {
//===----------------------------------------------------------------------===//
def MPI_AllReduceOp : MPI_Op<"allreduce", []> {
- let summary = "Equivalent to `MPI_Allreduce(sendbuf, recvbuf, op, "
- "MPI_COMM_WORLD)`";
+ let summary = "Equivalent to `MPI_Allreduce(sendbuf, recvbuf, op, comm)`";
let description = [{
MPI_Allreduce performs a reduction operation on the values in the sendbuf
array and stores the result in the recvbuf array. The operation is
@@ -235,7 +285,7 @@ def MPI_AllReduceOp : MPI_Op<"allreduce", []> {
Currently only the `MPI_Op` predefined in the standard (e.g. `MPI_SUM`) are
supported.
- Communicators other than `MPI_COMM_WORLD` are not supported for now.
+ If communicator is not specified, `MPI_COMM_WORLD` is used by default.
This operation can optionally return an `!mpi.retval` value that can be used
to check for errors.
@@ -244,14 +294,15 @@ def MPI_AllReduceOp : MPI_Op<"allreduce", []> {
let arguments = (
ins AnyMemRef : $sendbuf,
AnyMemRef : $recvbuf,
- MPI_OpClassAttr : $op
+ MPI_OpClassAttr : $op,
+ Optional<MPI_Comm> : $comm
);
let results = (outs Optional<MPI_Retval>:$retval);
- let assemblyFormat = "`(` $sendbuf `,` $recvbuf `,` $op `)` attr-dict `:`"
- "type($sendbuf) `,` type($recvbuf)"
- "(`->` type($retval)^)?";
+ let assemblyFormat = "`(` $sendbuf `,` $recvbuf `,` $op (`,` $comm ^)?`)` "
+ "attr-dict `:` type($sendbuf) `,` type($recvbuf) "
+ "(`,` type($comm) ^)? (`->` type($retval)^)?";
}
//===----------------------------------------------------------------------===//
@@ -259,20 +310,22 @@ def MPI_AllReduceOp : MPI_Op<"allreduce", []> {
//===----------------------------------------------------------------------===//
def MPI_Barrier : MPI_Op<"barrier", []> {
- let summary = "Equivalent to `MPI_Barrier(MPI_COMM_WORLD)`";
+ let summary = "Equivalent to `MPI_Barrier(comm)`";
let description = [{
MPI_Barrier blocks execution until all processes in the communicator have
reached this routine.
- Communicators other than `MPI_COMM_WORLD` are not supported for now.
+ If communicator is not specified, `MPI_COMM_WORLD` is used by default.
This operation can optionally return an `!mpi.retval` value that can be used
to check for errors.
}];
+ let arguments = (ins Optional<MPI_Comm> : $comm);
+
let results = (outs Optional<MPI_Retval>:$retval);
- let assemblyFormat = "attr-dict (`:` type($retval) ^)?";
+ let assemblyFormat = "(`(` $comm ^ `)`)? attr-dict (`:` type($retval) ^)?";
}
//===----------------------------------------------------------------------===//
@@ -295,8 +348,7 @@ def MPI_Wait : MPI_Op<"wait", []> {
let results = (outs Optional<MPI_Retval>:$retval);
- let assemblyFormat = "`(` $req `)` attr-dict `:` type($req) "
- "(`->` type($retval) ^)?";
+ let assemblyFormat = "`(` $req `)` attr-dict `:` type($req) (`->` type($retval) ^)?";
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/include/mlir/Dialect/MPI/IR/MPITypes.td b/mlir/include/mlir/Dialect/MPI/IR/MPITypes.td
index a55d30e778e22..b56a224d84774 100644
--- a/mlir/include/mlir/Dialect/MPI/IR/MPITypes.td
+++ b/mlir/include/mlir/Dialect/MPI/IR/MPITypes.td
@@ -40,6 +40,17 @@ def MPI_Retval : MPI_Type<"Retval", "retval"> {
}];
}
+//===----------------------------------------------------------------------===//
+// mpi::CommType
+//===----------------------------------------------------------------------===//
+
+def MPI_Comm : MPI_Type<"Comm", "comm"> {
+ let summary = "MPI communicator handler";
+ let description = [{
+ This type represents a handler to the MPI communicator.
+ }];
+}
+
//===----------------------------------------------------------------------===//
// mpi::RequestType
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/Dialect/MPI/ops.mlir b/mlir/test/Dialect/MPI/ops.mlir
index f23a7e18a2ee9..f5bdb86be94c4 100644
--- a/mlir/test/Dialect/MPI/ops.mlir
+++ b/mlir/test/Dialect/MPI/ops.mlir
@@ -12,30 +12,48 @@ func.func @mpi_test(%ref : memref<100xf32>) -> () {
// CHECK-NEXT: %retval_0, %size = mpi.comm_size : !mpi.retval, i32
%retval_0, %size = mpi.comm_size : !mpi.retval, i32
+ // CHECK-NEXT: %comm = mpi.comm_world : !mpi.comm
+ %comm = mpi.comm_world : !mpi.comm
+
+ // CHECK-NEXT: %new_comm, %retval3 = mpi.comm_split(%comm, %rank, %rank) : i32, !mpi.retval
+ %new_comm, %retval3 = mpi.comm_split(%comm, %rank, %rank) : mpi.comm, i32, i32
+
// CHECK-NEXT: mpi.send(%arg0, %rank, %rank) : memref<100xf32>, i32, i32
mpi.send(%ref, %rank, %rank) : memref<100xf32>, i32, i32
// CHECK-NEXT: %1 = mpi.send(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval
%err2 = mpi.send(%ref, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval
+ // CHECK-NEXT: mpi.send(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32, !mpi.comm
+ mpi.send(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32, !mpi.comm
+
// CHECK-NEXT: mpi.recv(%arg0, %rank, %rank) : memref<100xf32>, i32, i32
mpi.recv(%ref, %rank, %rank) : memref<100xf32>, i32, i32
// CHECK-NEXT: %2 = mpi.recv(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval
%err3 = mpi.recv(%ref, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval
+ // CHECK-NEXT: mpi.recv(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32, !mpi.comm
+ mpi.recv(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32, !mpi.comm
+
// CHECK-NEXT: %req = mpi.isend(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.request
%req = mpi.isend(%ref, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.request
// CHECK-NEXT: %retval_1, %req_2 = mpi.isend(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval, !mpi.request
%err4, %req2 = mpi.isend(%ref, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval, !mpi.request
+ // CHECK-NEXT: %3 = mpi.isend(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32, mpi.comm -> !mpi.request
+ %req1 = mpi.isend(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32, !mpi.comm -> !mpi.request
+
// CHECK-NEXT: %req_3 = mpi.irecv(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.request
%req3 = mpi.irecv(%ref, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.request
// CHECK-NEXT: %retval_4, %req_5 = mpi.irecv(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval, !mpi.request
%err5, %req4 = mpi.irecv(%ref, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval, !mpi.request
+ // CHECK-NEXT: %6 = mpi.irecv(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32, mpi.comm -> mpi.request
+ %req3 = mpi.irecv(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32, !mpi.comm -> !mpi.request
+
// CHECK-NEXT: mpi.wait(%req) : !mpi.request
mpi.wait(%req) : !mpi.request
@@ -48,12 +66,18 @@ func.func @mpi_test(%ref : memref<100xf32>) -> () {
// CHECK-NEXT: %5 = mpi.barrier : !mpi.retval
%err7 = mpi.barrier : !mpi.retval
+ // CHECK-NEXT: mpi.barrier(%comm) : !mpi.retval
+ mpi.barrier(%comm) : !mpi.retval
+
// CHECK-NEXT: mpi.allreduce(%arg0, %arg0, <MPI_SUM>) : memref<100xf32>, memref<100xf32>
mpi.allreduce(%ref, %ref, <MPI_SUM>) : memref<100xf32>, memref<100xf32>
// CHECK-NEXT: mpi.allreduce(%arg0, %arg0, <MPI_SUM>) : memref<100xf32>, memref<100xf32> -> !mpi.retval
%err8 = mpi.allreduce(%ref, %ref, <MPI_SUM>) : memref<100xf32>, memref<100xf32> -> !mpi.retval
+ // CHECK-NEXT: mpi.allreduce(%arg0, %arg0, MPI_SUM, %comm) : memref<100xf32>, memref<100xf32>, !mpi.comm
+ mpi.allreduce(%ref, %ref, MPI_SUM, %comm) : memref<100xf32>, memref<100xf32>, !mpi.comm
+
// CHECK-NEXT: %7 = mpi.finalize : !mpi.retval
%rval = mpi.finalize : !mpi.retval
>From 5feb59a71fb6ec60aee5ca24297d8468b27652cb Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergio=20S=C3=A1nchez=20Ram=C3=ADrez?=
<sergio.sanchez.ramirez+git at bsc.es>
Date: Sat, 1 Feb 2025 20:52:39 +0100
Subject: [PATCH 02/10] Fix assembly format for `comm_size`, `comm_rank`
---
mlir/include/mlir/Dialect/MPI/IR/MPIOps.td | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td b/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
index 3c6f5a8ac0ea8..cae411599c381 100644
--- a/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
+++ b/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
@@ -73,7 +73,8 @@ def MPI_CommRankOp : MPI_Op<"comm_rank", []> {
I32 : $rank
);
- let assemblyFormat = "(`(` $comm ^ `)`)? attr-dict `:` type(results)";
+ let assemblyFormat = "(`(` $comm ^ `)`)? attr-dict `:` (type($comm) ^ `->`)?"
+ "type(results)";
}
//===----------------------------------------------------------------------===//
@@ -97,7 +98,8 @@ def MPI_CommSizeOp : MPI_Op<"comm_size", []> {
I32 : $size
);
- let assemblyFormat = "(`(` $comm ^ `)`)? attr-dict `:` type(results)";
+ let assemblyFormat = "(`(` $comm ^ `)`)? attr-dict `:` (type($comm) ^ `->`)?"
+ "type(results)";
}
//===----------------------------------------------------------------------===//
>From e49e44e88baf9e5f6156c508aefee7d7682e624c Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergio=20S=C3=A1nchez=20Ram=C3=ADrez?=
<sergio.sanchez.ramirez+git at bsc.es>
Date: Sat, 1 Feb 2025 20:53:03 +0100
Subject: [PATCH 03/10] add more tests for `comm_size`, `comm_rank`
---
mlir/test/Dialect/MPI/ops.mlir | 22 ++++++++++++++++++++--
1 file changed, 20 insertions(+), 2 deletions(-)
diff --git a/mlir/test/Dialect/MPI/ops.mlir b/mlir/test/Dialect/MPI/ops.mlir
index f5bdb86be94c4..d7521353b34a1 100644
--- a/mlir/test/Dialect/MPI/ops.mlir
+++ b/mlir/test/Dialect/MPI/ops.mlir
@@ -6,14 +6,32 @@ func.func @mpi_test(%ref : memref<100xf32>) -> () {
// CHECK: %0 = mpi.init : !mpi.retval
%err = mpi.init : !mpi.retval
+ // CHECK-NEXT: %comm = mpi.comm_world : !mpi.comm
+ %comm = mpi.comm_world : !mpi.comm
+
+ // CHECK-NEXT: %rank = mpi.comm_rank : i32
+ %rank = mpi.comm_rank : i32
+
// CHECK-NEXT: %retval, %rank = mpi.comm_rank : !mpi.retval, i32
%retval, %rank = mpi.comm_rank : !mpi.retval, i32
+ // CHECK-NEXT: %retval, %rank = mpi.comm_rank : !mpi.comm -> i32
+ %rank = mpi.comm_rank(%comm) : !mpi.comm -> i32
+
+ // CHECK-NEXT: %retval, %rank = mpi.comm_rank : !mpi.comm -> !mpi.retval, i32
+ %retval, %rank = mpi.comm_rank(%comm) : !mpi.comm -> !mpi.retval, i32
+
+ // CHECK-NEXT: %size = mpi.comm_size : i32
+ %size = mpi.comm_size : i32
+
// CHECK-NEXT: %retval_0, %size = mpi.comm_size : !mpi.retval, i32
%retval_0, %size = mpi.comm_size : !mpi.retval, i32
- // CHECK-NEXT: %comm = mpi.comm_world : !mpi.comm
- %comm = mpi.comm_world : !mpi.comm
+ // CHECK-NEXT: %size = mpi.comm_size : !mpi.comm -> i32
+ %size = mpi.comm_size(%comm) : !mpi.comm -> i32
+
+ // CHECK-NEXT: %retval_0, %size = mpi.comm_size : !mpi.retval, i32
+ %retval_0, %size = mpi.comm_size(%comm) : !mpi.comm -> !mpi.retval, i32
// CHECK-NEXT: %new_comm, %retval3 = mpi.comm_split(%comm, %rank, %rank) : i32, !mpi.retval
%new_comm, %retval3 = mpi.comm_split(%comm, %rank, %rank) : mpi.comm, i32, i32
>From aeb54245b4a4ac1845b51fbc239fe08df4f038ea Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergio=20S=C3=A1nchez=20Ram=C3=ADrez?=
<sergio.sanchez.ramirez+git at bsc.es>
Date: Sat, 1 Feb 2025 22:19:45 +0100
Subject: [PATCH 04/10] fix some assembly formats
---
mlir/include/mlir/Dialect/MPI/IR/MPIOps.td | 13 +++++++------
1 file changed, 7 insertions(+), 6 deletions(-)
diff --git a/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td b/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
index cae411599c381..70868cda9952d 100644
--- a/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
+++ b/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
@@ -73,8 +73,8 @@ def MPI_CommRankOp : MPI_Op<"comm_rank", []> {
I32 : $rank
);
- let assemblyFormat = "(`(` $comm ^ `)`)? attr-dict `:` (type($comm) ^ `->`)?"
- "type(results)";
+ let assemblyFormat = "(`(` $comm ^ `)`)? attr-dict (`:` type($comm) ^ `->`):"
+ "(`:`)? type(results)";
}
//===----------------------------------------------------------------------===//
@@ -98,8 +98,8 @@ def MPI_CommSizeOp : MPI_Op<"comm_size", []> {
I32 : $size
);
- let assemblyFormat = "(`(` $comm ^ `)`)? attr-dict `:` (type($comm) ^ `->`)?"
- "type(results)";
+ let assemblyFormat = "(`(` $comm ^ `)`)? attr-dict (`:` type($comm) ^ `->`):"
+ "(`:`)? type(results)";
}
//===----------------------------------------------------------------------===//
@@ -122,11 +122,12 @@ def MPI_CommSplit : MPI_Op<"comm_split", []> {
let arguments = (ins MPI_Comm : $comm, I32 : $color, I32 : $key);
let results = (
- outs MPI_Comm : $newcomm,
- Optional<MPI_Retval> : $retval
+ outs Optional<MPI_Retval> : $retval,
+ MPI_Comm : $newcomm
);
let assemblyFormat = "`(` $comm `,` $color `,` $key `)` attr-dict `:` "
+ "type($comm) `,` type($color) `,` type($key) `->` "
"type(results)";
}
>From d8808f76ba9eff52f9301986a181b8c0c9ef461e Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergio=20S=C3=A1nchez=20Ram=C3=ADrez?=
<sergio.sanchez.ramirez+git at bsc.es>
Date: Sat, 1 Feb 2025 22:20:04 +0100
Subject: [PATCH 05/10] fix some tests
---
mlir/test/Dialect/MPI/ops.mlir | 18 ++++++++++++------
1 file changed, 12 insertions(+), 6 deletions(-)
diff --git a/mlir/test/Dialect/MPI/ops.mlir b/mlir/test/Dialect/MPI/ops.mlir
index d7521353b34a1..fad203ded1d06 100644
--- a/mlir/test/Dialect/MPI/ops.mlir
+++ b/mlir/test/Dialect/MPI/ops.mlir
@@ -33,8 +33,11 @@ func.func @mpi_test(%ref : memref<100xf32>) -> () {
// CHECK-NEXT: %retval_0, %size = mpi.comm_size : !mpi.retval, i32
%retval_0, %size = mpi.comm_size(%comm) : !mpi.comm -> !mpi.retval, i32
- // CHECK-NEXT: %new_comm, %retval3 = mpi.comm_split(%comm, %rank, %rank) : i32, !mpi.retval
- %new_comm, %retval3 = mpi.comm_split(%comm, %rank, %rank) : mpi.comm, i32, i32
+ // CHECK-NEXT: %new_comm = mpi.comm_split(%comm, %rank, %rank) : !mpi.comm, i32, i32 -> !mpi.comm
+ %new_comm = mpi.comm_split(%comm, %rank, %rank) : !mpi.comm, i32, i32
+
+ // CHECK-NEXT: %retval3, %new_comm = mpi.comm_split(%comm, %rank, %rank) : !mpi.comm, i32, i32 -> !mpi.retval, !mpi.comm
+ %retval3, %new_comm = mpi.comm_split(%comm, %rank, %rank) : !mpi.comm, i32, i32 -> !mpi.retval, !mpi.comm
// CHECK-NEXT: mpi.send(%arg0, %rank, %rank) : memref<100xf32>, i32, i32
mpi.send(%ref, %rank, %rank) : memref<100xf32>, i32, i32
@@ -78,14 +81,17 @@ func.func @mpi_test(%ref : memref<100xf32>) -> () {
// CHECK-NEXT: %3 = mpi.wait(%req_2) : !mpi.request -> !mpi.retval
%err6 = mpi.wait(%req2) : !mpi.request -> !mpi.retval
- // CHECK-NEXT: mpi.barrier : !mpi.retval
- mpi.barrier : !mpi.retval
+ // CHECK-NEXT: mpi.barrier
+ mpi.barrier
// CHECK-NEXT: %5 = mpi.barrier : !mpi.retval
%err7 = mpi.barrier : !mpi.retval
- // CHECK-NEXT: mpi.barrier(%comm) : !mpi.retval
- mpi.barrier(%comm) : !mpi.retval
+ // CHECK-NEXT: mpi.barrier(%comm)
+ mpi.barrier(%comm)
+
+ // CHECK-NEXT: %5 = mpi.barrier : !mpi.retval
+ %err7 = mpi.barrier : !mpi.retval
// CHECK-NEXT: mpi.allreduce(%arg0, %arg0, <MPI_SUM>) : memref<100xf32>, memref<100xf32>
mpi.allreduce(%ref, %ref, <MPI_SUM>) : memref<100xf32>, memref<100xf32>
>From 96970af4e849a39fc8f11e20dab2a0c3f4f64bae Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Sergio=20S=C3=A1nchez=20Ram=C3=ADrez?=
<sergio.sanchez.ramirez+git at bsc.es>
Date: Sat, 1 Feb 2025 22:20:43 +0100
Subject: [PATCH 06/10] try fixing the assembly forma of `barrier`
---
mlir/include/mlir/Dialect/MPI/IR/MPIOps.td | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td b/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
index 70868cda9952d..c76a5ee081552 100644
--- a/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
+++ b/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
@@ -328,7 +328,18 @@ def MPI_Barrier : MPI_Op<"barrier", []> {
let results = (outs Optional<MPI_Retval>:$retval);
- let assemblyFormat = "(`(` $comm ^ `)`)? attr-dict (`:` type($retval) ^)?";
+ // TODO fix assembly format
+ // let assemblyFormat = "("
+ // "(attr-dict) ^"
+ // "(attr-dict `:` type($retval)) ^"
+ // "(`(` $comm `)` attr-dict `:` type($comm)) ^"
+ // "(`(` $comm `)` attr-dict `:` type($comm) `->` type($retval))"
+ // ")?";
+ let assemblyFormat = [{
+ (`(` $comm ^ `)`)? attr-dict
+ (`:` type($comm) ^ `->`):(`:`)?
+ type(results)
+ }];
}
//===----------------------------------------------------------------------===//
>From cb0d2c139530fb5b5345b3c5c0af9bc15b9fa7eb Mon Sep 17 00:00:00 2001
From: "Schlimbach, Frank" <frank.schlimbach at intel.com>
Date: Thu, 27 Mar 2025 17:14:15 +0100
Subject: [PATCH 07/10] making communicator mandatory, fixing dependent code
and tests
---
mlir/include/mlir/Dialect/MPI/IR/MPIOps.td | 45 +++----
mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp | 24 +++-
mlir/test/Conversion/MPIToLLVM/ops.mlir | 22 ++--
.../MeshToMPI/convert-mesh-to-mpi.mlir | 62 +++++----
mlir/test/Dialect/MPI/ops.mlir | 121 +++++++-----------
5 files changed, 129 insertions(+), 145 deletions(-)
diff --git a/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td b/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
index c76a5ee081552..6bc25054bf48a 100644
--- a/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
+++ b/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
@@ -73,8 +73,7 @@ def MPI_CommRankOp : MPI_Op<"comm_rank", []> {
I32 : $rank
);
- let assemblyFormat = "(`(` $comm ^ `)`)? attr-dict (`:` type($comm) ^ `->`):"
- "(`:`)? type(results)";
+ let assemblyFormat = "`(` $comm `)` attr-dict `:` type(results)";
}
//===----------------------------------------------------------------------===//
@@ -91,15 +90,14 @@ def MPI_CommSizeOp : MPI_Op<"comm_size", []> {
to check for errors.
}];
- let arguments = (ins Optional<MPI_Comm> : $comm);
+ let arguments = (ins MPI_Comm : $comm);
let results = (
outs Optional<MPI_Retval> : $retval,
I32 : $size
);
- let assemblyFormat = "(`(` $comm ^ `)`)? attr-dict (`:` type($comm) ^ `->`):"
- "(`:`)? type(results)";
+ let assemblyFormat = "`(` $comm `)` attr-dict `:` type(results)";
}
//===----------------------------------------------------------------------===//
@@ -127,7 +125,7 @@ def MPI_CommSplit : MPI_Op<"comm_split", []> {
);
let assemblyFormat = "`(` $comm `,` $color `,` $key `)` attr-dict `:` "
- "type($comm) `,` type($color) `,` type($key) `->` "
+ "type($color) `,` type($key) `->` "
"type(results)";
}
@@ -153,12 +151,12 @@ def MPI_SendOp : MPI_Op<"send", []> {
ins AnyMemRef : $ref,
I32 : $tag,
I32 : $dest,
- Optional<MPI_Comm> : $comm
+ MPI_Comm : $comm
);
let results = (outs Optional<MPI_Retval>:$retval);
- let assemblyFormat = "`(` $ref `,` $tag `,` $dest (`,` $comm ^)? `)` attr-dict `:` "
+ let assemblyFormat = "`(` $ref `,` $tag `,` $dest `,` $comm `)` attr-dict `:` "
"type($ref) `,` type($tag) `,` type($dest)"
"(`->` type($retval)^)?";
let hasCanonicalizer = 1;
@@ -187,7 +185,7 @@ def MPI_ISendOp : MPI_Op<"isend", []> {
ins AnyMemRef : $ref,
I32 : $tag,
I32 : $rank,
- Optional<MPI_Comm> : $comm
+ MPI_Comm : $comm
);
let results = (
@@ -195,9 +193,9 @@ def MPI_ISendOp : MPI_Op<"isend", []> {
MPI_Request : $req
);
- let assemblyFormat = "`(` $ref `,` $tag `,` $rank (`,` $comm ^)?`)` attr-dict "
+ let assemblyFormat = "`(` $ref `,` $tag `,` $rank `,` $comm`)` attr-dict "
"`:` type($ref) `,` type($tag) `,` type($rank) "
- "(`,` type($comm) ^)? `->` type(results)";
+ "`->` type(results)";
let hasCanonicalizer = 1;
}
@@ -225,14 +223,14 @@ def MPI_RecvOp : MPI_Op<"recv", []> {
let arguments = (
ins AnyMemRef : $ref,
I32 : $tag, I32 : $source,
- Optional<MPI_Comm> : $comm
+ MPI_Comm : $comm
);
let results = (outs Optional<MPI_Retval>:$retval);
- let assemblyFormat = "`(` $ref `,` $tag `,` $source (`,` $comm ^)?`)` attr-dict"
+ let assemblyFormat = "`(` $ref `,` $tag `,` $source `,` $comm `)` attr-dict"
" `:` type($ref) `,` type($tag) `,` type($source) "
- "(`,` type($comm) ^)? (`->` type($retval)^)?";
+ "(`->` type($retval)^)?";
let hasCanonicalizer = 1;
}
@@ -259,7 +257,7 @@ def MPI_IRecvOp : MPI_Op<"irecv", []> {
ins AnyMemRef : $ref,
I32 : $tag,
I32 : $rank,
- Optional<MPI_Comm> : $comm
+ MPI_Comm : $comm
);
let results = (
@@ -267,9 +265,9 @@ def MPI_IRecvOp : MPI_Op<"irecv", []> {
MPI_Request : $req
);
- let assemblyFormat = "`(` $ref `,` $tag `,` $rank (`,` $comm ^)?`)` attr-dict "
+ let assemblyFormat = "`(` $ref `,` $tag `,` $rank `,` $comm`)` attr-dict "
"`:` type($ref) `,` type($tag) `,` type($rank)"
- "(`,` type($comm) ^)? `->` type(results)";
+ "`->` type(results)";
let hasCanonicalizer = 1;
}
@@ -298,14 +296,14 @@ def MPI_AllReduceOp : MPI_Op<"allreduce", []> {
ins AnyMemRef : $sendbuf,
AnyMemRef : $recvbuf,
MPI_OpClassAttr : $op,
- Optional<MPI_Comm> : $comm
+ MPI_Comm : $comm
);
let results = (outs Optional<MPI_Retval>:$retval);
- let assemblyFormat = "`(` $sendbuf `,` $recvbuf `,` $op (`,` $comm ^)?`)` "
+ let assemblyFormat = "`(` $sendbuf `,` $recvbuf `,` $op `,` $comm `)` "
"attr-dict `:` type($sendbuf) `,` type($recvbuf) "
- "(`,` type($comm) ^)? (`->` type($retval)^)?";
+ "(`->` type($retval)^)?";
}
//===----------------------------------------------------------------------===//
@@ -324,7 +322,7 @@ def MPI_Barrier : MPI_Op<"barrier", []> {
to check for errors.
}];
- let arguments = (ins Optional<MPI_Comm> : $comm);
+ let arguments = (ins MPI_Comm : $comm);
let results = (outs Optional<MPI_Retval>:$retval);
@@ -336,9 +334,8 @@ def MPI_Barrier : MPI_Op<"barrier", []> {
// "(`(` $comm `)` attr-dict `:` type($comm) `->` type($retval))"
// ")?";
let assemblyFormat = [{
- (`(` $comm ^ `)`)? attr-dict
- (`:` type($comm) ^ `->`):(`:`)?
- type(results)
+ `(` $comm `)` attr-dict
+ (`->` type($retval)^)?
}];
}
diff --git a/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp b/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp
index 87c2938e4e52b..cafbf835de22f 100644
--- a/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp
+++ b/mlir/lib/Conversion/MeshToMPI/MeshToMPI.cpp
@@ -310,11 +310,16 @@ class ConvertProcessLinearIndexOp
}
// Otherwise call create mpi::CommRankOp
- auto rank = rewriter
- .create<mpi::CommRankOp>(
- loc, TypeRange{mpi::RetvalType::get(op->getContext()),
- rewriter.getI32Type()})
- .getRank();
+ auto ctx = op.getContext();
+ Value commWorld =
+ rewriter.create<mpi::CommWorldOp>(loc, mpi::CommType::get(ctx));
+ auto rank =
+ rewriter
+ .create<mpi::CommRankOp>(
+ loc,
+ TypeRange{mpi::RetvalType::get(ctx), rewriter.getI32Type()},
+ commWorld)
+ .getRank();
rewriter.replaceOpWithNewOp<arith::IndexCastOp>(op, rewriter.getIndexType(),
rank);
return success();
@@ -652,6 +657,9 @@ struct ConvertUpdateHaloOp : public OpConversionPattern<UpdateHaloOp> {
auto upperSendOffset = rewriter.create<arith::SubIOp>(
loc, upperRecvOffset, toValue(haloSizes[currHaloDim * 2]));
+ Value commWorld = rewriter.create<mpi::CommWorldOp>(
+ loc, mpi::CommType::get(op->getContext()));
+
// Make sure we send/recv in a way that does not lead to a dead-lock.
// The current approach is by far not optimal, this should be at least
// be a red-black pattern or using MPI_sendrecv.
@@ -680,7 +688,8 @@ struct ConvertUpdateHaloOp : public OpConversionPattern<UpdateHaloOp> {
auto subview = builder.create<memref::SubViewOp>(
loc, array, offsets, dimSizes, strides);
builder.create<memref::CopyOp>(loc, subview, buffer);
- builder.create<mpi::SendOp>(loc, TypeRange{}, buffer, tag, to);
+ builder.create<mpi::SendOp>(loc, TypeRange{}, buffer, tag, to,
+ commWorld);
builder.create<scf::YieldOp>(loc);
});
// if has neighbor: receive halo data into buffer and copy to array
@@ -688,7 +697,8 @@ struct ConvertUpdateHaloOp : public OpConversionPattern<UpdateHaloOp> {
loc, hasFrom, [&](OpBuilder &builder, Location loc) {
offsets[dim] = upperHalo ? OpFoldResult(upperRecvOffset)
: OpFoldResult(lowerRecvOffset);
- builder.create<mpi::RecvOp>(loc, TypeRange{}, buffer, tag, from);
+ builder.create<mpi::RecvOp>(loc, TypeRange{}, buffer, tag, from,
+ commWorld);
auto subview = builder.create<memref::SubViewOp>(
loc, array, offsets, dimSizes, strides);
builder.create<memref::CopyOp>(loc, buffer, subview);
diff --git a/mlir/test/Conversion/MPIToLLVM/ops.mlir b/mlir/test/Conversion/MPIToLLVM/ops.mlir
index 3c1b344efd50b..36d573cf54799 100644
--- a/mlir/test/Conversion/MPIToLLVM/ops.mlir
+++ b/mlir/test/Conversion/MPIToLLVM/ops.mlir
@@ -22,11 +22,12 @@ module attributes { mpi.dlti = #dlti.map<"MPI:Implementation" = "MPICH"> } {
// CHECK: [[v7:%.*]] = llvm.call @MPI_Init([[v6]], [[v6]]) : (!llvm.ptr, !llvm.ptr) -> i32
%0 = mpi.init : !mpi.retval
+ %comm = mpi.comm_world : !mpi.comm
// CHECK: [[v8:%.*]] = llvm.mlir.constant(1140850688 : i32) : i32
// CHECK: [[v9:%.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: [[v10:%.*]] = llvm.alloca [[v9]] x i32 : (i32) -> !llvm.ptr
// CHECK: [[v11:%.*]] = llvm.call @MPI_Comm_rank([[v8]], [[v10]]) : (i32, !llvm.ptr) -> i32
- %retval, %rank = mpi.comm_rank : !mpi.retval, i32
+ %retval, %rank = mpi.comm_rank(%comm) : !mpi.retval, i32
// CHECK: [[v12:%.*]] = llvm.load [[v10]] : !llvm.ptr -> i32
// CHECK: [[v13:%.*]] = llvm.extractvalue [[v5]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -37,7 +38,7 @@ module attributes { mpi.dlti = #dlti.map<"MPI:Implementation" = "MPICH"> } {
// CHECK: [[v18:%.*]] = llvm.mlir.constant(1275069450 : i32) : i32
// CHECK: [[v19:%.*]] = llvm.mlir.constant(1140850688 : i32) : i32
// CHECK: [[v20:%.*]] = llvm.call @MPI_Send([[v15]], [[v17]], [[v18]], [[v12]], [[v12]], [[v19]]) : (!llvm.ptr, i32, i32, i32, i32, i32) -> i32
- mpi.send(%arg0, %rank, %rank) : memref<100xf32>, i32, i32
+ mpi.send(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32
// CHECK: [[v21:%.*]] = llvm.extractvalue [[v5]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: [[v22:%.*]] = llvm.extractvalue [[v5]][2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -47,7 +48,7 @@ module attributes { mpi.dlti = #dlti.map<"MPI:Implementation" = "MPICH"> } {
// CHECK: [[v26:%.*]] = llvm.mlir.constant(1275069450 : i32) : i32
// CHECK: [[v27:%.*]] = llvm.mlir.constant(1140850688 : i32) : i32
// CHECK: [[v28:%.*]] = llvm.call @MPI_Send([[v23]], [[v25]], [[v26]], [[v12]], [[v12]], [[v27]]) : (!llvm.ptr, i32, i32, i32, i32, i32) -> i32
- %1 = mpi.send(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval
+ %1 = mpi.send(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32 -> !mpi.retval
// CHECK: [[v29:%.*]] = llvm.extractvalue [[v5]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: [[v30:%.*]] = llvm.extractvalue [[v5]][2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -59,7 +60,7 @@ module attributes { mpi.dlti = #dlti.map<"MPI:Implementation" = "MPICH"> } {
// CHECK: [[v36:%.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: [[v37:%.*]] = llvm.inttoptr [[v36]] : i64 to !llvm.ptr
// CHECK: [[v38:%.*]] = llvm.call @MPI_Recv([[v31]], [[v33]], [[v34]], [[v12]], [[v12]], [[v35]], [[v37]]) : (!llvm.ptr, i32, i32, i32, i32, i32, !llvm.ptr) -> i32
- mpi.recv(%arg0, %rank, %rank) : memref<100xf32>, i32, i32
+ mpi.recv(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32
// CHECK: [[v39:%.*]] = llvm.extractvalue [[v5]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: [[v40:%.*]] = llvm.extractvalue [[v5]][2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -71,7 +72,7 @@ module attributes { mpi.dlti = #dlti.map<"MPI:Implementation" = "MPICH"> } {
// CHECK: [[v46:%.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: [[v47:%.*]] = llvm.inttoptr [[v46]] : i64 to !llvm.ptr
// CHECK: [[v48:%.*]] = llvm.call @MPI_Recv([[v41]], [[v43]], [[v44]], [[v12]], [[v12]], [[v45]], [[v47]]) : (!llvm.ptr, i32, i32, i32, i32, i32, !llvm.ptr) -> i32
- %2 = mpi.recv(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval
+ %2 = mpi.recv(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32 -> !mpi.retval
// CHECK: [[v49:%.*]] = llvm.call @MPI_Finalize() : () -> i32
%3 = mpi.finalize : !mpi.retval
@@ -106,11 +107,12 @@ module attributes { mpi.dlti = #dlti.map<"MPI:Implementation" = "OpenMPI"> } {
// CHECK: [[v7:%.*]] = llvm.call @MPI_Init([[v6]], [[v6]]) : (!llvm.ptr, !llvm.ptr) -> i32
%0 = mpi.init : !mpi.retval
+ %comm = mpi.comm_world : !mpi.comm
// CHECK: [[v8:%.*]] = llvm.mlir.addressof @ompi_mpi_comm_world : !llvm.ptr
// CHECK: [[v9:%.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: [[v10:%.*]] = llvm.alloca [[v9]] x i32 : (i32) -> !llvm.ptr
// CHECK: [[v11:%.*]] = llvm.call @MPI_Comm_rank([[v8]], [[v10]]) : (!llvm.ptr, !llvm.ptr) -> i32
- %retval, %rank = mpi.comm_rank : !mpi.retval, i32
+ %retval, %rank = mpi.comm_rank(%comm) : !mpi.retval, i32
// CHECK: [[v12:%.*]] = llvm.load [[v10]] : !llvm.ptr -> i32
// CHECK: [[v13:%.*]] = llvm.extractvalue [[v5]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -121,7 +123,7 @@ module attributes { mpi.dlti = #dlti.map<"MPI:Implementation" = "OpenMPI"> } {
// CHECK: [[v18:%.*]] = llvm.mlir.addressof @ompi_mpi_float : !llvm.ptr
// CHECK: [[v19:%.*]] = llvm.mlir.addressof @ompi_mpi_comm_world : !llvm.ptr
// CHECK: [[v20:%.*]] = llvm.call @MPI_Send([[v15]], [[v17]], [[v18]], [[v12]], [[v12]], [[v19]]) : (!llvm.ptr, i32, !llvm.ptr, i32, i32, !llvm.ptr) -> i32
- mpi.send(%arg0, %rank, %rank) : memref<100xf32>, i32, i32
+ mpi.send(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32
// CHECK: [[v21:%.*]] = llvm.extractvalue [[v5]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: [[v22:%.*]] = llvm.extractvalue [[v5]][2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -131,7 +133,7 @@ module attributes { mpi.dlti = #dlti.map<"MPI:Implementation" = "OpenMPI"> } {
// CHECK: [[v26:%.*]] = llvm.mlir.addressof @ompi_mpi_float : !llvm.ptr
// CHECK: [[v27:%.*]] = llvm.mlir.addressof @ompi_mpi_comm_world : !llvm.ptr
// CHECK: [[v28:%.*]] = llvm.call @MPI_Send([[v23]], [[v25]], [[v26]], [[v12]], [[v12]], [[v27]]) : (!llvm.ptr, i32, !llvm.ptr, i32, i32, !llvm.ptr) -> i32
- %1 = mpi.send(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval
+ %1 = mpi.send(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32 -> !mpi.retval
// CHECK: [[v29:%.*]] = llvm.extractvalue [[v5]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: [[v30:%.*]] = llvm.extractvalue [[v5]][2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -143,7 +145,7 @@ module attributes { mpi.dlti = #dlti.map<"MPI:Implementation" = "OpenMPI"> } {
// CHECK: [[v36:%.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: [[v37:%.*]] = llvm.inttoptr [[v36]] : i64 to !llvm.ptr
// CHECK: [[v38:%.*]] = llvm.call @MPI_Recv([[v31]], [[v33]], [[v34]], [[v12]], [[v12]], [[v35]], [[v37]]) : (!llvm.ptr, i32, !llvm.ptr, i32, i32, !llvm.ptr, !llvm.ptr) -> i32
- mpi.recv(%arg0, %rank, %rank) : memref<100xf32>, i32, i32
+ mpi.recv(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32
// CHECK: [[v39:%.*]] = llvm.extractvalue [[v5]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: [[v40:%.*]] = llvm.extractvalue [[v5]][2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -155,7 +157,7 @@ module attributes { mpi.dlti = #dlti.map<"MPI:Implementation" = "OpenMPI"> } {
// CHECK: [[v46:%.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: [[v47:%.*]] = llvm.inttoptr [[v46]] : i64 to !llvm.ptr
// CHECK: [[v48:%.*]] = llvm.call @MPI_Recv([[v41]], [[v43]], [[v44]], [[v12]], [[v12]], [[v45]], [[v47]]) : (!llvm.ptr, i32, !llvm.ptr, i32, i32, !llvm.ptr, !llvm.ptr) -> i32
- %2 = mpi.recv(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval
+ %2 = mpi.recv(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32 -> !mpi.retval
// CHECK: [[v49:%.*]] = llvm.call @MPI_Finalize() : () -> i32
%3 = mpi.finalize : !mpi.retval
diff --git a/mlir/test/Conversion/MeshToMPI/convert-mesh-to-mpi.mlir b/mlir/test/Conversion/MeshToMPI/convert-mesh-to-mpi.mlir
index 4e60c6f0d4e44..23756bb66928d 100644
--- a/mlir/test/Conversion/MeshToMPI/convert-mesh-to-mpi.mlir
+++ b/mlir/test/Conversion/MeshToMPI/convert-mesh-to-mpi.mlir
@@ -4,7 +4,7 @@
// CHECK: mesh.mesh @mesh0
mesh.mesh @mesh0(shape = 3x4x5)
func.func @process_multi_index() -> (index, index, index) {
- // CHECK: mpi.comm_rank : !mpi.retval, i32
+ // CHECK: mpi.comm_rank
// CHECK-DAG: %[[v4:.*]] = arith.remsi
// CHECK-DAG: %[[v0:.*]] = arith.remsi
// CHECK-DAG: %[[v1:.*]] = arith.remsi
@@ -15,7 +15,7 @@ func.func @process_multi_index() -> (index, index, index) {
// CHECK-LABEL: func @process_linear_index
func.func @process_linear_index() -> index {
- // CHECK: %[[RES:.*]], %[[rank:.*]] = mpi.comm_rank : !mpi.retval, i32
+ // CHECK: %[[RES:.*]], %[[rank:.*]] = mpi.comm_rank
// CHECK: %[[cast:.*]] = arith.index_cast %[[rank]] : i32 to index
%0 = mesh.process_linear_index on @mesh0 : index
// CHECK: return %[[cast]] : index
@@ -113,17 +113,17 @@ module attributes { mpi.dlti = #dlti.map<"MPI:comm_world_rank" = 1> } {
// CHECK: [[vc91_i32:%.*]] = arith.constant 91 : i32
// CHECK-NEXT: [[vc0_i32:%.*]] = arith.constant 0 : i32
// CHECK-NEXT: [[vc2_i32:%.*]] = arith.constant 2 : i32
+ // CHECK-NEXT: [[v0:%.*]] = mpi.comm_world : !mpi.comm
// CHECK-NEXT: [[valloc:%.*]] = memref.alloc() : memref<2x120x120xi8>
- // CHECK-NEXT: [[vsubview:%.*]] = memref.subview [[varg0]][118, 0, 0] [2, 120, 120] [1, 1, 1] : memref<120x120x120xi8> to memref<2x120x120xi8
- // CHECK-NEXT: memref.copy [[vsubview]], [[valloc]] : memref<2x120x120xi8
- // CHECK-SAME: to memref<2x120x120xi8>
- // CHECK-NEXT: mpi.send([[valloc]], [[vc91_i32]], [[vc2_i32]]) : memref<2x120x120xi8>, i32, i32
- // CHECK-NEXT: mpi.recv([[valloc]], [[vc91_i32]], [[vc0_i32]]) : memref<2x120x120xi8>, i32, i32
- // CHECK-NEXT: [[vsubview_0:%.*]] = memref.subview [[varg0]][0, 0, 0] [2, 120, 120] [1, 1, 1] : memref<120x120x120xi8> to memref<2x120x120xi8
- // CHECK-NEXT: memref.copy [[valloc]], [[vsubview_0]] : memref<2x120x120xi8> to memref<2x120x120xi8
+ // CHECK-NEXT: [[vsubview:%.*]] = memref.subview [[varg0]][118, 0, 0] [2, 120, 120] [1, 1, 1] : memref<120x120x120xi8> to memref<2x120x120xi8, strided<[14400, 120, 1], offset: 1699200>>
+ // CHECK-NEXT: memref.copy [[vsubview]], [[valloc]] : memref<2x120x120xi8, strided<[14400, 120, 1], offset: 1699200>> to memref<2x120x120xi8>
+ // CHECK-NEXT: mpi.send([[valloc]], [[vc91_i32]], [[vc2_i32]], [[v0]]) : memref<2x120x120xi8>, i32, i32
+ // CHECK-NEXT: mpi.recv([[valloc]], [[vc91_i32]], [[vc0_i32]], [[v0]]) : memref<2x120x120xi8>, i32, i32
+ // CHECK-NEXT: [[vsubview_0:%.*]] = memref.subview [[varg0]][0, 0, 0] [2, 120, 120] [1, 1, 1] : memref<120x120x120xi8> to memref<2x120x120xi8, strided<[14400, 120, 1]>>
+ // CHECK-NEXT: memref.copy [[valloc]], [[vsubview_0]] : memref<2x120x120xi8> to memref<2x120x120xi8, strided<[14400, 120, 1]>>
// CHECK-NEXT: memref.dealloc [[valloc]] : memref<2x120x120xi8>
%res = mesh.update_halo %arg0 on @mesh0 split_axes = [[0]] halo_sizes = [2, 0] : memref<120x120x120xi8>
- // CHECK: return [[res:%.*]] : memref<120x120x120xi8>
+ // CHECK: return [[varg0]] : memref<120x120x120xi8>
return %res : memref<120x120x120xi8>
}
}
@@ -140,41 +140,44 @@ module attributes { mpi.dlti = #dlti.map<"MPI:comm_world_rank" = 24> } {
// CHECK-NEXT: [[vc91_i32:%.*]] = arith.constant 91 : i32
// CHECK-NEXT: [[vc4_i32:%.*]] = arith.constant 4 : i32
// CHECK-NEXT: [[vc44_i32:%.*]] = arith.constant 44 : i32
+ // CHECK-NEXT: [[v0:%.*]] = mpi.comm_world : !mpi.comm
// CHECK-NEXT: [[valloc:%.*]] = memref.alloc() : memref<117x113x5xi8>
// CHECK-NEXT: [[vsubview:%.*]] = memref.subview [[varg0]][1, 3, 109] [117, 113, 5] [1, 1, 1] : memref<120x120x120xi8> to memref<117x113x5xi8, strided<[14400, 120, 1], offset: 14869>>
// CHECK-NEXT: memref.copy [[vsubview]], [[valloc]] : memref<117x113x5xi8, strided<[14400, 120, 1], offset: 14869>> to memref<117x113x5xi8>
- // CHECK-NEXT: mpi.send([[valloc]], [[vc91_i32]], [[vc44_i32]]) : memref<117x113x5xi8>, i32, i32
- // CHECK-NEXT: mpi.recv([[valloc]], [[vc91_i32]], [[vc4_i32]]) : memref<117x113x5xi8>, i32, i32
+ // CHECK-NEXT: mpi.send([[valloc]], [[vc91_i32]], [[vc44_i32]], [[v0]]) : memref<117x113x5xi8>, i32, i32
+ // CHECK-NEXT: mpi.recv([[valloc]], [[vc91_i32]], [[vc4_i32]], [[v0]]) : memref<117x113x5xi8>, i32, i32
// CHECK-NEXT: [[vsubview_0:%.*]] = memref.subview [[varg0]][1, 3, 0] [117, 113, 5] [1, 1, 1] : memref<120x120x120xi8> to memref<117x113x5xi8, strided<[14400, 120, 1], offset: 14760>>
// CHECK-NEXT: memref.copy [[valloc]], [[vsubview_0]] : memref<117x113x5xi8> to memref<117x113x5xi8, strided<[14400, 120, 1], offset: 14760>>
// CHECK-NEXT: memref.dealloc [[valloc]] : memref<117x113x5xi8>
// CHECK-NEXT: [[valloc_1:%.*]] = memref.alloc() : memref<117x113x6xi8>
// CHECK-NEXT: [[vsubview_2:%.*]] = memref.subview [[varg0]][1, 3, 5] [117, 113, 6] [1, 1, 1] : memref<120x120x120xi8> to memref<117x113x6xi8, strided<[14400, 120, 1], offset: 14765>>
// CHECK-NEXT: memref.copy [[vsubview_2]], [[valloc_1]] : memref<117x113x6xi8, strided<[14400, 120, 1], offset: 14765>> to memref<117x113x6xi8>
- // CHECK-NEXT: mpi.send([[valloc_1]], [[vc91_i32]], [[vc4_i32]]) : memref<117x113x6xi8>, i32, i32
- // CHECK-NEXT: mpi.recv([[valloc_1]], [[vc91_i32]], [[vc44_i32]]) : memref<117x113x6xi8>, i32, i32
+ // CHECK-NEXT: mpi.send([[valloc_1]], [[vc91_i32]], [[vc4_i32]], [[v0]]) : memref<117x113x6xi8>, i32, i32
+ // CHECK-NEXT: mpi.recv([[valloc_1]], [[vc91_i32]], [[vc44_i32]], [[v0]]) : memref<117x113x6xi8>, i32, i32
// CHECK-NEXT: [[vsubview_3:%.*]] = memref.subview [[varg0]][1, 3, 114] [117, 113, 6] [1, 1, 1] : memref<120x120x120xi8> to memref<117x113x6xi8, strided<[14400, 120, 1], offset: 14874>>
// CHECK-NEXT: memref.copy [[valloc_1]], [[vsubview_3]] : memref<117x113x6xi8> to memref<117x113x6xi8, strided<[14400, 120, 1], offset: 14874>>
// CHECK-NEXT: memref.dealloc [[valloc_1]] : memref<117x113x6xi8>
+ // CHECK-NEXT: [[v1:%.*]] = mpi.comm_world : !mpi.comm
// CHECK-NEXT: [[valloc_4:%.*]] = memref.alloc() : memref<117x3x120xi8>
// CHECK-NEXT: [[vsubview_5:%.*]] = memref.subview [[varg0]][1, 113, 0] [117, 3, 120] [1, 1, 1] : memref<120x120x120xi8> to memref<117x3x120xi8, strided<[14400, 120, 1], offset: 27960>>
// CHECK-NEXT: memref.copy [[vsubview_5]], [[valloc_4]] : memref<117x3x120xi8, strided<[14400, 120, 1], offset: 27960>> to memref<117x3x120xi8>
- // CHECK-NEXT: mpi.send([[valloc_4]], [[vc91_i32]], [[vc29_i32]]) : memref<117x3x120xi8>, i32, i32
+ // CHECK-NEXT: mpi.send([[valloc_4]], [[vc91_i32]], [[vc29_i32]], [[v1]]) : memref<117x3x120xi8>, i32, i32
// CHECK-NEXT: memref.dealloc [[valloc_4]] : memref<117x3x120xi8>
// CHECK-NEXT: [[valloc_6:%.*]] = memref.alloc() : memref<117x4x120xi8>
- // CHECK-NEXT: mpi.recv([[valloc_6]], [[vc91_i32]], [[vc29_i32]]) : memref<117x4x120xi8>, i32, i32
+ // CHECK-NEXT: mpi.recv([[valloc_6]], [[vc91_i32]], [[vc29_i32]], [[v1]]) : memref<117x4x120xi8>, i32, i32
// CHECK-NEXT: [[vsubview_7:%.*]] = memref.subview [[varg0]][1, 116, 0] [117, 4, 120] [1, 1, 1] : memref<120x120x120xi8> to memref<117x4x120xi8, strided<[14400, 120, 1], offset: 28320>>
// CHECK-NEXT: memref.copy [[valloc_6]], [[vsubview_7]] : memref<117x4x120xi8> to memref<117x4x120xi8, strided<[14400, 120, 1], offset: 28320>>
// CHECK-NEXT: memref.dealloc [[valloc_6]] : memref<117x4x120xi8>
+ // CHECK-NEXT: [[v2:%.*]] = mpi.comm_world : !mpi.comm
// CHECK-NEXT: [[valloc_8:%.*]] = memref.alloc() : memref<1x120x120xi8>
- // CHECK-NEXT: mpi.recv([[valloc_8]], [[vc91_i32]], [[vc23_i32]]) : memref<1x120x120xi8>, i32, i32
+ // CHECK-NEXT: mpi.recv([[valloc_8]], [[vc91_i32]], [[vc23_i32]], [[v2]]) : memref<1x120x120xi8>, i32, i32
// CHECK-NEXT: [[vsubview_9:%.*]] = memref.subview [[varg0]][0, 0, 0] [1, 120, 120] [1, 1, 1] : memref<120x120x120xi8> to memref<1x120x120xi8, strided<[14400, 120, 1]>>
// CHECK-NEXT: memref.copy [[valloc_8]], [[vsubview_9]] : memref<1x120x120xi8> to memref<1x120x120xi8, strided<[14400, 120, 1]>>
// CHECK-NEXT: memref.dealloc [[valloc_8]] : memref<1x120x120xi8>
// CHECK-NEXT: [[valloc_10:%.*]] = memref.alloc() : memref<2x120x120xi8>
// CHECK-NEXT: [[vsubview_11:%.*]] = memref.subview [[varg0]][1, 0, 0] [2, 120, 120] [1, 1, 1] : memref<120x120x120xi8> to memref<2x120x120xi8, strided<[14400, 120, 1], offset: 14400>>
// CHECK-NEXT: memref.copy [[vsubview_11]], [[valloc_10]] : memref<2x120x120xi8, strided<[14400, 120, 1], offset: 14400>> to memref<2x120x120xi8>
- // CHECK-NEXT: mpi.send([[valloc_10]], [[vc91_i32]], [[vc23_i32]]) : memref<2x120x120xi8>, i32, i32
+ // CHECK-NEXT: mpi.send([[valloc_10]], [[vc91_i32]], [[vc23_i32]], [[v2]]) : memref<2x120x120xi8>, i32, i32
// CHECK-NEXT: memref.dealloc [[valloc_10]] : memref<2x120x120xi8>
%res = mesh.update_halo %arg0 on @mesh0 split_axes = [[2], [1], [0]] halo_sizes = [1, 2, 3, 4, 5, 6] : memref<120x120x120xi8>
// CHECK: return [[varg0]] : memref<120x120x120xi8>
@@ -191,45 +194,48 @@ module attributes { mpi.dlti = #dlti.map<"MPI:comm_world_rank" = 24> } {
// CHECK-NEXT: [[vc4_i32:%.*]] = arith.constant 4 : i32
// CHECK-NEXT: [[vc91_i32:%.*]] = arith.constant 91 : i32
// CHECK-NEXT: [[v0:%.*]] = bufferization.to_memref [[varg0]] : tensor<120x120x120xi8> to memref<120x120x120xi8>
+ // CHECK-NEXT: [[v1:%.*]] = mpi.comm_world : !mpi.comm
// CHECK-NEXT: [[valloc:%.*]] = memref.alloc() : memref<117x113x5xi8>
// CHECK-NEXT: [[vsubview:%.*]] = memref.subview [[v0]][1, 3, 109] [117, 113, 5] [1, 1, 1] : memref<120x120x120xi8> to memref<117x113x5xi8, strided<[14400, 120, 1], offset: 14869>>
// CHECK-NEXT: memref.copy [[vsubview]], [[valloc]] : memref<117x113x5xi8, strided<[14400, 120, 1], offset: 14869>> to memref<117x113x5xi8>
- // CHECK-NEXT: mpi.send([[valloc]], [[vc91_i32]], [[vc44_i32]]) : memref<117x113x5xi8>, i32, i32
- // CHECK-NEXT: mpi.recv([[valloc]], [[vc91_i32]], [[vc4_i32]]) : memref<117x113x5xi8>, i32, i32
+ // CHECK-NEXT: mpi.send([[valloc]], [[vc91_i32]], [[vc44_i32]], [[v1]]) : memref<117x113x5xi8>, i32, i32
+ // CHECK-NEXT: mpi.recv([[valloc]], [[vc91_i32]], [[vc4_i32]], [[v1]]) : memref<117x113x5xi8>, i32, i32
// CHECK-NEXT: [[vsubview_0:%.*]] = memref.subview [[v0]][1, 3, 0] [117, 113, 5] [1, 1, 1] : memref<120x120x120xi8> to memref<117x113x5xi8, strided<[14400, 120, 1], offset: 14760>>
// CHECK-NEXT: memref.copy [[valloc]], [[vsubview_0]] : memref<117x113x5xi8> to memref<117x113x5xi8, strided<[14400, 120, 1], offset: 14760>>
// CHECK-NEXT: memref.dealloc [[valloc]] : memref<117x113x5xi8>
// CHECK-NEXT: [[valloc_1:%.*]] = memref.alloc() : memref<117x113x6xi8>
// CHECK-NEXT: [[vsubview_2:%.*]] = memref.subview [[v0]][1, 3, 5] [117, 113, 6] [1, 1, 1] : memref<120x120x120xi8> to memref<117x113x6xi8, strided<[14400, 120, 1], offset: 14765>>
// CHECK-NEXT: memref.copy [[vsubview_2]], [[valloc_1]] : memref<117x113x6xi8, strided<[14400, 120, 1], offset: 14765>> to memref<117x113x6xi8>
- // CHECK-NEXT: mpi.send([[valloc_1]], [[vc91_i32]], [[vc4_i32]]) : memref<117x113x6xi8>, i32, i32
- // CHECK-NEXT: mpi.recv([[valloc_1]], [[vc91_i32]], [[vc44_i32]]) : memref<117x113x6xi8>, i32, i32
+ // CHECK-NEXT: mpi.send([[valloc_1]], [[vc91_i32]], [[vc4_i32]], [[v1]]) : memref<117x113x6xi8>, i32, i32
+ // CHECK-NEXT: mpi.recv([[valloc_1]], [[vc91_i32]], [[vc44_i32]], [[v1]]) : memref<117x113x6xi8>, i32, i32
// CHECK-NEXT: [[vsubview_3:%.*]] = memref.subview [[v0]][1, 3, 114] [117, 113, 6] [1, 1, 1] : memref<120x120x120xi8> to memref<117x113x6xi8, strided<[14400, 120, 1], offset: 14874>>
// CHECK-NEXT: memref.copy [[valloc_1]], [[vsubview_3]] : memref<117x113x6xi8> to memref<117x113x6xi8, strided<[14400, 120, 1], offset: 14874>>
// CHECK-NEXT: memref.dealloc [[valloc_1]] : memref<117x113x6xi8>
+ // CHECK-NEXT: [[v2:%.*]] = mpi.comm_world : !mpi.comm
// CHECK-NEXT: [[valloc_4:%.*]] = memref.alloc() : memref<117x3x120xi8>
// CHECK-NEXT: [[vsubview_5:%.*]] = memref.subview [[v0]][1, 113, 0] [117, 3, 120] [1, 1, 1] : memref<120x120x120xi8> to memref<117x3x120xi8, strided<[14400, 120, 1], offset: 27960>>
// CHECK-NEXT: memref.copy [[vsubview_5]], [[valloc_4]] : memref<117x3x120xi8, strided<[14400, 120, 1], offset: 27960>> to memref<117x3x120xi8>
- // CHECK-NEXT: mpi.send([[valloc_4]], [[vc91_i32]], [[vc29_i32]]) : memref<117x3x120xi8>, i32, i32
+ // CHECK-NEXT: mpi.send([[valloc_4]], [[vc91_i32]], [[vc29_i32]], [[v2]]) : memref<117x3x120xi8>, i32, i32
// CHECK-NEXT: memref.dealloc [[valloc_4]] : memref<117x3x120xi8>
// CHECK-NEXT: [[valloc_6:%.*]] = memref.alloc() : memref<117x4x120xi8>
- // CHECK-NEXT: mpi.recv([[valloc_6]], [[vc91_i32]], [[vc29_i32]]) : memref<117x4x120xi8>, i32, i32
+ // CHECK-NEXT: mpi.recv([[valloc_6]], [[vc91_i32]], [[vc29_i32]], [[v2]]) : memref<117x4x120xi8>, i32, i32
// CHECK-NEXT: [[vsubview_7:%.*]] = memref.subview [[v0]][1, 116, 0] [117, 4, 120] [1, 1, 1] : memref<120x120x120xi8> to memref<117x4x120xi8, strided<[14400, 120, 1], offset: 28320>>
// CHECK-NEXT: memref.copy [[valloc_6]], [[vsubview_7]] : memref<117x4x120xi8> to memref<117x4x120xi8, strided<[14400, 120, 1], offset: 28320>>
// CHECK-NEXT: memref.dealloc [[valloc_6]] : memref<117x4x120xi8>
+ // CHECK-NEXT: [[v3:%.*]] = mpi.comm_world : !mpi.comm
// CHECK-NEXT: [[valloc_8:%.*]] = memref.alloc() : memref<1x120x120xi8>
- // CHECK-NEXT: mpi.recv([[valloc_8]], [[vc91_i32]], [[vc23_i32]]) : memref<1x120x120xi8>, i32, i32
+ // CHECK-NEXT: mpi.recv([[valloc_8]], [[vc91_i32]], [[vc23_i32]], [[v3]]) : memref<1x120x120xi8>, i32, i32
// CHECK-NEXT: [[vsubview_9:%.*]] = memref.subview [[v0]][0, 0, 0] [1, 120, 120] [1, 1, 1] : memref<120x120x120xi8> to memref<1x120x120xi8, strided<[14400, 120, 1]>>
// CHECK-NEXT: memref.copy [[valloc_8]], [[vsubview_9]] : memref<1x120x120xi8> to memref<1x120x120xi8, strided<[14400, 120, 1]>>
// CHECK-NEXT: memref.dealloc [[valloc_8]] : memref<1x120x120xi8>
// CHECK-NEXT: [[valloc_10:%.*]] = memref.alloc() : memref<2x120x120xi8>
// CHECK-NEXT: [[vsubview_11:%.*]] = memref.subview [[v0]][1, 0, 0] [2, 120, 120] [1, 1, 1] : memref<120x120x120xi8> to memref<2x120x120xi8, strided<[14400, 120, 1], offset: 14400>>
// CHECK-NEXT: memref.copy [[vsubview_11]], [[valloc_10]] : memref<2x120x120xi8, strided<[14400, 120, 1], offset: 14400>> to memref<2x120x120xi8>
- // CHECK-NEXT: mpi.send([[valloc_10]], [[vc91_i32]], [[vc23_i32]]) : memref<2x120x120xi8>, i32, i32
+ // CHECK-NEXT: mpi.send([[valloc_10]], [[vc91_i32]], [[vc23_i32]], [[v3]]) : memref<2x120x120xi8>, i32, i32
// CHECK-NEXT: memref.dealloc [[valloc_10]] : memref<2x120x120xi8>
- // CHECK-NEXT: [[v1:%.*]] = bufferization.to_tensor [[v0]] restrict writable : memref<120x120x120xi8> to tensor<120x120x120xi8>
+ // CHECK-NEXT: [[v4:%.*]] = bufferization.to_tensor [[v0]] restrict writable : memref<120x120x120xi8> to tensor<120x120x120xi8>
%res = mesh.update_halo %arg0 on @mesh0 split_axes = [[2], [1], [0]] halo_sizes = [1, 2, 3, 4, 5, 6] : tensor<120x120x120xi8>
- // CHECK: return [[v1]] : tensor<120x120x120xi8>
+ // CHECK-NEXT: return [[v4]] : tensor<120x120x120xi8>
return %res : tensor<120x120x120xi8>
}
}
diff --git a/mlir/test/Dialect/MPI/ops.mlir b/mlir/test/Dialect/MPI/ops.mlir
index fad203ded1d06..272e550e649cf 100644
--- a/mlir/test/Dialect/MPI/ops.mlir
+++ b/mlir/test/Dialect/MPI/ops.mlir
@@ -1,114 +1,83 @@
// RUN: mlir-opt %s | mlir-opt | FileCheck %s
+// CHECK-LABEL: func.func @mpi_test(
+// CHECK-SAME: [[varg0:%.*]]: memref<100xf32>) {
func.func @mpi_test(%ref : memref<100xf32>) -> () {
// Note: the !mpi.retval result is optional on all operations except mpi.error_class
- // CHECK: %0 = mpi.init : !mpi.retval
+ // CHECK-NEXT: [[v0:%.*]] = mpi.init : !mpi.retval
%err = mpi.init : !mpi.retval
- // CHECK-NEXT: %comm = mpi.comm_world : !mpi.comm
+ // CHECK-NEXT: [[v1:%.*]] = mpi.comm_world : !mpi.comm
%comm = mpi.comm_world : !mpi.comm
- // CHECK-NEXT: %rank = mpi.comm_rank : i32
- %rank = mpi.comm_rank : i32
+ // CHECK-NEXT: [[vrank:%.*]] = mpi.comm_rank([[v1]]) : i32
+ %rank = mpi.comm_rank(%comm) : i32
- // CHECK-NEXT: %retval, %rank = mpi.comm_rank : !mpi.retval, i32
- %retval, %rank = mpi.comm_rank : !mpi.retval, i32
+ // CHECK-NEXT: [[vretval:%.*]], [[vrank_0:%.*]] = mpi.comm_rank([[v1]]) : !mpi.retval, i32
+ %retval, %rank_1 = mpi.comm_rank(%comm) : !mpi.retval, i32
- // CHECK-NEXT: %retval, %rank = mpi.comm_rank : !mpi.comm -> i32
- %rank = mpi.comm_rank(%comm) : !mpi.comm -> i32
+ // CHECK-NEXT: [[vsize:%.*]] = mpi.comm_size([[v1]]) : i32
+ %size = mpi.comm_size(%comm) : i32
- // CHECK-NEXT: %retval, %rank = mpi.comm_rank : !mpi.comm -> !mpi.retval, i32
- %retval, %rank = mpi.comm_rank(%comm) : !mpi.comm -> !mpi.retval, i32
+ // CHECK-NEXT: [[vretval_1:%.*]], [[vsize_2:%.*]] = mpi.comm_size([[v1]]) : !mpi.retval, i32
+ %retval_0, %size_1 = mpi.comm_size(%comm) : !mpi.retval, i32
- // CHECK-NEXT: %size = mpi.comm_size : i32
- %size = mpi.comm_size : i32
+ // CHECK-NEXT: [[vnewcomm:%.*]] = mpi.comm_split([[v1]], [[vrank]], [[vrank]]) : i32, i32 -> !mpi.comm
+ %new_comm = mpi.comm_split(%comm, %rank, %rank) : i32, i32 -> !mpi.comm
- // CHECK-NEXT: %retval_0, %size = mpi.comm_size : !mpi.retval, i32
- %retval_0, %size = mpi.comm_size : !mpi.retval, i32
+ // CHECK-NEXT: [[vretval_3:%.*]], [[vnewcomm_4:%.*]] = mpi.comm_split([[v1]], [[vrank]], [[vrank]]) : i32, i32 -> !mpi.retval, !mpi.comm
+ %retval_1, %new_comm_1 = mpi.comm_split(%comm, %rank, %rank) : i32, i32 -> !mpi.retval, !mpi.comm
- // CHECK-NEXT: %size = mpi.comm_size : !mpi.comm -> i32
- %size = mpi.comm_size(%comm) : !mpi.comm -> i32
+ // CHECK-NEXT: mpi.send([[varg0]], [[vrank]], [[vrank]], [[v1]]) : memref<100xf32>, i32, i32
+ mpi.send(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32
- // CHECK-NEXT: %retval_0, %size = mpi.comm_size : !mpi.retval, i32
- %retval_0, %size = mpi.comm_size(%comm) : !mpi.comm -> !mpi.retval, i32
+ // CHECK-NEXT: [[v2:%.*]] = mpi.send([[varg0]], [[vrank]], [[vrank]], [[v1]]) : memref<100xf32>, i32, i32 -> !mpi.retval
+ %retval_2 = mpi.send(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32 -> !mpi.retval
- // CHECK-NEXT: %new_comm = mpi.comm_split(%comm, %rank, %rank) : !mpi.comm, i32, i32 -> !mpi.comm
- %new_comm = mpi.comm_split(%comm, %rank, %rank) : !mpi.comm, i32, i32
+ // CHECK-NEXT: mpi.recv([[varg0]], [[vrank]], [[vrank]], [[v1]]) : memref<100xf32>, i32, i32
+ mpi.recv(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32
- // CHECK-NEXT: %retval3, %new_comm = mpi.comm_split(%comm, %rank, %rank) : !mpi.comm, i32, i32 -> !mpi.retval, !mpi.comm
- %retval3, %new_comm = mpi.comm_split(%comm, %rank, %rank) : !mpi.comm, i32, i32 -> !mpi.retval, !mpi.comm
+ // CHECK-NEXT: [[v3:%.*]] = mpi.recv([[varg0]], [[vrank]], [[vrank]], [[v1]]) : memref<100xf32>, i32, i32 -> !mpi.retval
+ %retval_3 = mpi.recv(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32 -> !mpi.retval
- // CHECK-NEXT: mpi.send(%arg0, %rank, %rank) : memref<100xf32>, i32, i32
- mpi.send(%ref, %rank, %rank) : memref<100xf32>, i32, i32
+ // CHECK-NEXT: [[vretval_5:%.*]], [[vreq:%.*]] = mpi.isend([[varg0]], [[vrank]], [[vrank]], [[v1]]) : memref<100xf32>, i32, i32 -> !mpi.retval, !mpi.request
+ %err4, %req2 = mpi.isend(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32 -> !mpi.retval, !mpi.request
- // CHECK-NEXT: %1 = mpi.send(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval
- %err2 = mpi.send(%ref, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval
+ // CHECK-NEXT: [[vreq_6:%.*]] = mpi.isend([[varg0]], [[vrank]], [[vrank]], [[v1]]) : memref<100xf32>, i32, i32 -> !mpi.request
+ %req1 = mpi.isend(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32 -> !mpi.request
- // CHECK-NEXT: mpi.send(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32, !mpi.comm
- mpi.send(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32, !mpi.comm
+ // CHECK-NEXT: [[vreq_7:%.*]] = mpi.irecv([[varg0]], [[vrank]], [[vrank]], [[v1]]) : memref<100xf32>, i32, i32 -> !mpi.request
+ %req3 = mpi.irecv(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32 -> !mpi.request
- // CHECK-NEXT: mpi.recv(%arg0, %rank, %rank) : memref<100xf32>, i32, i32
- mpi.recv(%ref, %rank, %rank) : memref<100xf32>, i32, i32
+ // CHECK-NEXT: [[vretval_8:%.*]], [[vreq_9:%.*]] = mpi.irecv([[varg0]], [[vrank]], [[vrank]], [[v1]]) : memref<100xf32>, i32, i32 -> !mpi.retval, !mpi.request
+ %err5, %req4 = mpi.irecv(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32 -> !mpi.retval, !mpi.request
- // CHECK-NEXT: %2 = mpi.recv(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval
- %err3 = mpi.recv(%ref, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval
+ // CHECK-NEXT: mpi.wait([[vreq_9]]) : !mpi.request
+ mpi.wait(%req4) : !mpi.request
- // CHECK-NEXT: mpi.recv(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32, !mpi.comm
- mpi.recv(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32, !mpi.comm
-
- // CHECK-NEXT: %req = mpi.isend(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.request
- %req = mpi.isend(%ref, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.request
-
- // CHECK-NEXT: %retval_1, %req_2 = mpi.isend(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval, !mpi.request
- %err4, %req2 = mpi.isend(%ref, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval, !mpi.request
-
- // CHECK-NEXT: %3 = mpi.isend(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32, mpi.comm -> !mpi.request
- %req1 = mpi.isend(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32, !mpi.comm -> !mpi.request
-
- // CHECK-NEXT: %req_3 = mpi.irecv(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.request
- %req3 = mpi.irecv(%ref, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.request
-
- // CHECK-NEXT: %retval_4, %req_5 = mpi.irecv(%arg0, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval, !mpi.request
- %err5, %req4 = mpi.irecv(%ref, %rank, %rank) : memref<100xf32>, i32, i32 -> !mpi.retval, !mpi.request
-
- // CHECK-NEXT: %6 = mpi.irecv(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32, mpi.comm -> mpi.request
- %req3 = mpi.irecv(%ref, %rank, %rank, %comm) : memref<100xf32>, i32, i32, !mpi.comm -> !mpi.request
-
- // CHECK-NEXT: mpi.wait(%req) : !mpi.request
- mpi.wait(%req) : !mpi.request
-
- // CHECK-NEXT: %3 = mpi.wait(%req_2) : !mpi.request -> !mpi.retval
+ // CHECK-NEXT: [[v4:%.*]] = mpi.wait([[vreq]]) : !mpi.request -> !mpi.retval
%err6 = mpi.wait(%req2) : !mpi.request -> !mpi.retval
- // CHECK-NEXT: mpi.barrier
- mpi.barrier
-
- // CHECK-NEXT: %5 = mpi.barrier : !mpi.retval
- %err7 = mpi.barrier : !mpi.retval
-
- // CHECK-NEXT: mpi.barrier(%comm)
+ // CHECK-NEXT: mpi.barrier([[v1]])
mpi.barrier(%comm)
- // CHECK-NEXT: %5 = mpi.barrier : !mpi.retval
- %err7 = mpi.barrier : !mpi.retval
-
- // CHECK-NEXT: mpi.allreduce(%arg0, %arg0, <MPI_SUM>) : memref<100xf32>, memref<100xf32>
- mpi.allreduce(%ref, %ref, <MPI_SUM>) : memref<100xf32>, memref<100xf32>
+ // CHECK-NEXT: [[v5:%.*]] = mpi.barrier([[v1]]) -> !mpi.retval
+ %err7 = mpi.barrier(%comm) -> !mpi.retval
- // CHECK-NEXT: mpi.allreduce(%arg0, %arg0, <MPI_SUM>) : memref<100xf32>, memref<100xf32> -> !mpi.retval
- %err8 = mpi.allreduce(%ref, %ref, <MPI_SUM>) : memref<100xf32>, memref<100xf32> -> !mpi.retval
+ // CHECK-NEXT: [[v6:%.*]] = mpi.allreduce([[varg0]], [[varg0]], <MPI_SUM>, [[v1]]) : memref<100xf32>, memref<100xf32> -> !mpi.retval
+ %err8 = mpi.allreduce(%ref, %ref, <MPI_SUM>, %comm) : memref<100xf32>, memref<100xf32> -> !mpi.retval
- // CHECK-NEXT: mpi.allreduce(%arg0, %arg0, MPI_SUM, %comm) : memref<100xf32>, memref<100xf32>, !mpi.comm
- mpi.allreduce(%ref, %ref, MPI_SUM, %comm) : memref<100xf32>, memref<100xf32>, !mpi.comm
+ // CHECK-NEXT: mpi.allreduce([[varg0]], [[varg0]], <MPI_SUM>, [[v1]]) : memref<100xf32>, memref<100xf32>
+ mpi.allreduce(%ref, %ref, <MPI_SUM>, %comm) : memref<100xf32>, memref<100xf32>
- // CHECK-NEXT: %7 = mpi.finalize : !mpi.retval
+ // CHECK-NEXT: [[v7:%.*]] = mpi.finalize : !mpi.retval
%rval = mpi.finalize : !mpi.retval
- // CHECK-NEXT: %8 = mpi.retval_check %retval = <MPI_SUCCESS> : i1
+ // CHECK-NEXT: [[v8:%.*]] = mpi.retval_check [[vretval:%.*]] = <MPI_SUCCESS> : i1
%res = mpi.retval_check %retval = <MPI_SUCCESS> : i1
- // CHECK-NEXT: %9 = mpi.error_class %0 : !mpi.retval
+ // CHECK-NEXT: [[v9:%.*]] = mpi.error_class [[v0]] : !mpi.retval
%errclass = mpi.error_class %err : !mpi.retval
// CHECK-NEXT: return
>From 1fc03105810ab5cc23a54b5968a9a391dc8d1cf7 Mon Sep 17 00:00:00 2001
From: "Schlimbach, Frank" <frank.schlimbach at intel.com>
Date: Thu, 27 Mar 2025 18:07:10 +0100
Subject: [PATCH 08/10] lowering mpi.commworld and tests, works for MPICH, noht
openmpi
---
mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp | 48 +++++++++++++++------
mlir/test/Conversion/MPIToLLVM/ops.mlir | 12 ++----
2 files changed, 40 insertions(+), 20 deletions(-)
diff --git a/mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp b/mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp
index d91f9512ccb8f..be8f5989740e3 100644
--- a/mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp
+++ b/mlir/lib/Conversion/MPIToLLVM/MPIToLLVM.cpp
@@ -295,6 +295,26 @@ struct FinalizeOpLowering : public ConvertOpToLLVMPattern<mpi::FinalizeOp> {
}
};
+//===----------------------------------------------------------------------===//
+// CommWorldOpLowering
+//===----------------------------------------------------------------------===//
+
+struct CommWorldOpLowering : public ConvertOpToLLVMPattern<mpi::CommWorldOp> {
+ using ConvertOpToLLVMPattern::ConvertOpToLLVMPattern;
+
+ LogicalResult
+ matchAndRewrite(mpi::CommWorldOp op, OpAdaptor adaptor,
+ ConversionPatternRewriter &rewriter) const override {
+ // grab a reference to the global module op:
+ auto moduleOp = op->getParentOfType<ModuleOp>();
+ auto mpiTraits = MPIImplTraits::get(moduleOp);
+ // get MPI_COMM_WORLD
+ rewriter.replaceOp(op, mpiTraits->getCommWorld(op.getLoc(), rewriter));
+
+ return success();
+ }
+};
+
//===----------------------------------------------------------------------===//
// CommRankOpLowering
//===----------------------------------------------------------------------===//
@@ -317,12 +337,12 @@ struct CommRankOpLowering : public ConvertOpToLLVMPattern<mpi::CommRankOp> {
auto moduleOp = op->getParentOfType<ModuleOp>();
auto mpiTraits = MPIImplTraits::get(moduleOp);
- // get MPI_COMM_WORLD
- Value commWorld = mpiTraits->getCommWorld(loc, rewriter);
+ // get communicator
+ Value comm = adaptor.getComm();
// LLVM Function type representing `i32 MPI_Comm_rank(ptr, ptr)`
auto rankFuncType =
- LLVM::LLVMFunctionType::get(i32, {commWorld.getType(), ptrType});
+ LLVM::LLVMFunctionType::get(i32, {comm.getType(), ptrType});
// get or create function declaration:
LLVM::LLVMFuncOp initDecl = getOrDefineFunction(
moduleOp, loc, rewriter, "MPI_Comm_rank", rankFuncType);
@@ -331,7 +351,7 @@ struct CommRankOpLowering : public ConvertOpToLLVMPattern<mpi::CommRankOp> {
auto one = rewriter.create<LLVM::ConstantOp>(loc, i32, 1);
auto rankptr = rewriter.create<LLVM::AllocaOp>(loc, ptrType, i32, one);
auto callOp = rewriter.create<LLVM::CallOp>(
- loc, initDecl, ValueRange{commWorld, rankptr.getRes()});
+ loc, initDecl, ValueRange{comm, rankptr.getRes()});
// load the rank into a register
auto loadedRank =
@@ -386,12 +406,12 @@ struct SendOpLowering : public ConvertOpToLLVMPattern<mpi::SendOp> {
size = rewriter.create<LLVM::TruncOp>(loc, i32, size);
auto mpiTraits = MPIImplTraits::get(moduleOp);
Value dataType = mpiTraits->getDataType(loc, rewriter, elemType);
- Value commWorld = mpiTraits->getCommWorld(loc, rewriter);
+ Value comm = adaptor.getComm();
// LLVM Function type representing `i32 MPI_send(data, count, datatype, dst,
// tag, comm)`
auto funcType = LLVM::LLVMFunctionType::get(
- i32, {ptrType, i32, dataType.getType(), i32, i32, commWorld.getType()});
+ i32, {ptrType, i32, dataType.getType(), i32, i32, comm.getType()});
// get or create function declaration:
LLVM::LLVMFuncOp funcDecl =
getOrDefineFunction(moduleOp, loc, rewriter, "MPI_Send", funcType);
@@ -400,7 +420,7 @@ struct SendOpLowering : public ConvertOpToLLVMPattern<mpi::SendOp> {
auto funcCall = rewriter.create<LLVM::CallOp>(
loc, funcDecl,
ValueRange{dataPtr, size, dataType, adaptor.getDest(), adaptor.getTag(),
- commWorld});
+ comm});
if (op.getRetval())
rewriter.replaceOp(op, funcCall.getResult());
else
@@ -445,7 +465,7 @@ struct RecvOpLowering : public ConvertOpToLLVMPattern<mpi::RecvOp> {
size = rewriter.create<LLVM::TruncOp>(loc, i32, size);
auto mpiTraits = MPIImplTraits::get(moduleOp);
Value dataType = mpiTraits->getDataType(loc, rewriter, elemType);
- Value commWorld = mpiTraits->getCommWorld(loc, rewriter);
+ Value comm = adaptor.getComm();
Value statusIgnore = rewriter.create<LLVM::ConstantOp>(
loc, i64, mpiTraits->getStatusIgnore());
statusIgnore =
@@ -455,7 +475,7 @@ struct RecvOpLowering : public ConvertOpToLLVMPattern<mpi::RecvOp> {
// tag, comm)`
auto funcType =
LLVM::LLVMFunctionType::get(i32, {ptrType, i32, dataType.getType(), i32,
- i32, commWorld.getType(), ptrType});
+ i32, comm.getType(), ptrType});
// get or create function declaration:
LLVM::LLVMFuncOp funcDecl =
getOrDefineFunction(moduleOp, loc, rewriter, "MPI_Recv", funcType);
@@ -464,7 +484,7 @@ struct RecvOpLowering : public ConvertOpToLLVMPattern<mpi::RecvOp> {
auto funcCall = rewriter.create<LLVM::CallOp>(
loc, funcDecl,
ValueRange{dataPtr, size, dataType, adaptor.getSource(),
- adaptor.getTag(), commWorld, statusIgnore});
+ adaptor.getTag(), comm, statusIgnore});
if (op.getRetval())
rewriter.replaceOp(op, funcCall.getResult());
else
@@ -497,8 +517,12 @@ struct FuncToLLVMDialectInterface : public ConvertToLLVMPatternInterface {
void mpi::populateMPIToLLVMConversionPatterns(LLVMTypeConverter &converter,
RewritePatternSet &patterns) {
- patterns.add<CommRankOpLowering, FinalizeOpLowering, InitOpLowering,
- SendOpLowering, RecvOpLowering>(converter);
+ // FIXME: Need tldi info to get mpi implementation to know the Communicator
+ // type
+ Type commType = IntegerType::get(&converter.getContext(), 32);
+ converter.addConversion([&](mpi::CommType type) { return commType; });
+ patterns.add<CommRankOpLowering, CommWorldOpLowering, FinalizeOpLowering,
+ InitOpLowering, SendOpLowering, RecvOpLowering>(converter);
}
void mpi::registerConvertMPIToLLVMInterface(DialectRegistry ®istry) {
diff --git a/mlir/test/Conversion/MPIToLLVM/ops.mlir b/mlir/test/Conversion/MPIToLLVM/ops.mlir
index 36d573cf54799..801faccdba03d 100644
--- a/mlir/test/Conversion/MPIToLLVM/ops.mlir
+++ b/mlir/test/Conversion/MPIToLLVM/ops.mlir
@@ -36,8 +36,7 @@ module attributes { mpi.dlti = #dlti.map<"MPI:Implementation" = "MPICH"> } {
// CHECK: [[v16:%.*]] = llvm.extractvalue [[v5]][3, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: [[v17:%.*]] = llvm.trunc [[v16]] : i64 to i32
// CHECK: [[v18:%.*]] = llvm.mlir.constant(1275069450 : i32) : i32
- // CHECK: [[v19:%.*]] = llvm.mlir.constant(1140850688 : i32) : i32
- // CHECK: [[v20:%.*]] = llvm.call @MPI_Send([[v15]], [[v17]], [[v18]], [[v12]], [[v12]], [[v19]]) : (!llvm.ptr, i32, i32, i32, i32, i32) -> i32
+ // CHECK: [[v20:%.*]] = llvm.call @MPI_Send([[v15]], [[v17]], [[v18]], [[v12]], [[v12]], [[v8]]) : (!llvm.ptr, i32, i32, i32, i32, i32) -> i32
mpi.send(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32
// CHECK: [[v21:%.*]] = llvm.extractvalue [[v5]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -46,8 +45,7 @@ module attributes { mpi.dlti = #dlti.map<"MPI:Implementation" = "MPICH"> } {
// CHECK: [[v24:%.*]] = llvm.extractvalue [[v5]][3, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: [[v25:%.*]] = llvm.trunc [[v24]] : i64 to i32
// CHECK: [[v26:%.*]] = llvm.mlir.constant(1275069450 : i32) : i32
- // CHECK: [[v27:%.*]] = llvm.mlir.constant(1140850688 : i32) : i32
- // CHECK: [[v28:%.*]] = llvm.call @MPI_Send([[v23]], [[v25]], [[v26]], [[v12]], [[v12]], [[v27]]) : (!llvm.ptr, i32, i32, i32, i32, i32) -> i32
+ // CHECK: [[v28:%.*]] = llvm.call @MPI_Send([[v23]], [[v25]], [[v26]], [[v12]], [[v12]], [[v8]]) : (!llvm.ptr, i32, i32, i32, i32, i32) -> i32
%1 = mpi.send(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32 -> !mpi.retval
// CHECK: [[v29:%.*]] = llvm.extractvalue [[v5]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -56,10 +54,9 @@ module attributes { mpi.dlti = #dlti.map<"MPI:Implementation" = "MPICH"> } {
// CHECK: [[v32:%.*]] = llvm.extractvalue [[v5]][3, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: [[v33:%.*]] = llvm.trunc [[v32]] : i64 to i32
// CHECK: [[v34:%.*]] = llvm.mlir.constant(1275069450 : i32) : i32
- // CHECK: [[v35:%.*]] = llvm.mlir.constant(1140850688 : i32) : i32
// CHECK: [[v36:%.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: [[v37:%.*]] = llvm.inttoptr [[v36]] : i64 to !llvm.ptr
- // CHECK: [[v38:%.*]] = llvm.call @MPI_Recv([[v31]], [[v33]], [[v34]], [[v12]], [[v12]], [[v35]], [[v37]]) : (!llvm.ptr, i32, i32, i32, i32, i32, !llvm.ptr) -> i32
+ // CHECK: [[v38:%.*]] = llvm.call @MPI_Recv([[v31]], [[v33]], [[v34]], [[v12]], [[v12]], [[v8]], [[v37]]) : (!llvm.ptr, i32, i32, i32, i32, i32, !llvm.ptr) -> i32
mpi.recv(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32
// CHECK: [[v39:%.*]] = llvm.extractvalue [[v5]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -68,10 +65,9 @@ module attributes { mpi.dlti = #dlti.map<"MPI:Implementation" = "MPICH"> } {
// CHECK: [[v42:%.*]] = llvm.extractvalue [[v5]][3, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: [[v43:%.*]] = llvm.trunc [[v42]] : i64 to i32
// CHECK: [[v44:%.*]] = llvm.mlir.constant(1275069450 : i32) : i32
- // CHECK: [[v45:%.*]] = llvm.mlir.constant(1140850688 : i32) : i32
// CHECK: [[v46:%.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: [[v47:%.*]] = llvm.inttoptr [[v46]] : i64 to !llvm.ptr
- // CHECK: [[v48:%.*]] = llvm.call @MPI_Recv([[v41]], [[v43]], [[v44]], [[v12]], [[v12]], [[v45]], [[v47]]) : (!llvm.ptr, i32, i32, i32, i32, i32, !llvm.ptr) -> i32
+ // CHECK: [[v48:%.*]] = llvm.call @MPI_Recv([[v41]], [[v43]], [[v44]], [[v12]], [[v12]], [[v8]], [[v47]]) : (!llvm.ptr, i32, i32, i32, i32, i32, !llvm.ptr) -> i32
%2 = mpi.recv(%arg0, %rank, %rank, %comm) : memref<100xf32>, i32, i32 -> !mpi.retval
// CHECK: [[v49:%.*]] = llvm.call @MPI_Finalize() : () -> i32
>From 3995722c9384d8398da5723ecf4bc2d62ce0b3e3 Mon Sep 17 00:00:00 2001
From: Frank Schlimbach <frank.schlimbach at intel.com>
Date: Fri, 28 Mar 2025 09:09:52 +0100
Subject: [PATCH 09/10] Fixing oversights and comments (review)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: Sergio Sánchez Ramírez <15837247+mofeing at users.noreply.github.com>
---
mlir/include/mlir/Dialect/MPI/IR/MPIOps.td | 4 +---
1 file changed, 1 insertion(+), 3 deletions(-)
diff --git a/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td b/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
index 6bc25054bf48a..3c38f82bca291 100644
--- a/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
+++ b/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
@@ -60,13 +60,11 @@ def MPI_CommRankOp : MPI_Op<"comm_rank", []> {
let summary = "Get the current rank, equivalent to "
"`MPI_Comm_rank(comm, &rank)`";
let description = [{
- If communicator is not specified, `MPI_COMM_WORLD` is used by default.
-
This operation can optionally return an `!mpi.retval` value that can be used
to check for errors.
}];
- let arguments = (ins Optional<MPI_Comm> : $comm);
+ let arguments = (ins MPI_Comm : $comm);
let results = (
outs Optional<MPI_Retval> : $retval,
>From 46a19b8f234398e1bccf30269e2d83880ccbbe17 Mon Sep 17 00:00:00 2001
From: Frank Schlimbach <frank.schlimbach at intel.com>
Date: Fri, 28 Mar 2025 09:12:54 +0100
Subject: [PATCH 10/10] Apply suggestions from code review
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Co-authored-by: Sergio Sánchez Ramírez <15837247+mofeing at users.noreply.github.com>
---
mlir/include/mlir/Dialect/MPI/IR/MPIOps.td | 15 +--------------
1 file changed, 1 insertion(+), 14 deletions(-)
diff --git a/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td b/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
index 3c38f82bca291..67e51bfa197ad 100644
--- a/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
+++ b/mlir/include/mlir/Dialect/MPI/IR/MPIOps.td
@@ -82,8 +82,6 @@ def MPI_CommSizeOp : MPI_Op<"comm_size", []> {
let summary = "Get the size of the group associated to the communicator, "
"equivalent to `MPI_Comm_size(comm, &size)`";
let description = [{
- If communicator is not specified, `MPI_COMM_WORLD` is used by default.
-
This operation can optionally return an `!mpi.retval` value that can be used
to check for errors.
}];
@@ -139,8 +137,6 @@ def MPI_SendOp : MPI_Op<"send", []> {
`dest`. The `tag` value and communicator enables the library to determine
the matching of multiple sends and receives between the same ranks.
- If communicator is not specified, `MPI_COMM_WORLD` is used by default.
-
This operation can optionally return an `!mpi.retval` value that can be used
to check for errors.
}];
@@ -173,8 +169,6 @@ def MPI_ISendOp : MPI_Op<"isend", []> {
determine the matching of multiple sends and receives between the same
ranks.
- If communicator is not specified, `MPI_COMM_WORLD` is used by default.
-
This operation can optionally return an `!mpi.retval` value that can be used
to check for errors.
}];
@@ -202,7 +196,7 @@ def MPI_ISendOp : MPI_Op<"isend", []> {
//===----------------------------------------------------------------------===//
def MPI_RecvOp : MPI_Op<"recv", []> {
- let summary = "Equivalent to `MPI_Recv(ptr, size, dtype, dest, tag, "
+ let summary = "Equivalent to `MPI_Recv(ptr, size, dtype, source, tag, "
"comm, MPI_STATUS_IGNORE)`";
let description = [{
MPI_Recv performs a blocking receive of `size` elements of type `dtype`
@@ -210,7 +204,6 @@ def MPI_RecvOp : MPI_Op<"recv", []> {
determine the matching of multiple sends and receives between the same
ranks.
- If communicator is not specified, `MPI_COMM_WORLD` is used by default.
The MPI_Status is set to `MPI_STATUS_IGNORE`, as the status object
is not yet ported to MLIR.
@@ -245,8 +238,6 @@ def MPI_IRecvOp : MPI_Op<"irecv", []> {
determine the matching of multiple sends and receives between the same
ranks.
- If communicator is not specified, `MPI_COMM_WORLD` is used by default.
-
This operation can optionally return an `!mpi.retval` value that can be used
to check for errors.
}];
@@ -284,8 +275,6 @@ def MPI_AllReduceOp : MPI_Op<"allreduce", []> {
Currently only the `MPI_Op` predefined in the standard (e.g. `MPI_SUM`) are
supported.
- If communicator is not specified, `MPI_COMM_WORLD` is used by default.
-
This operation can optionally return an `!mpi.retval` value that can be used
to check for errors.
}];
@@ -314,8 +303,6 @@ def MPI_Barrier : MPI_Op<"barrier", []> {
MPI_Barrier blocks execution until all processes in the communicator have
reached this routine.
- If communicator is not specified, `MPI_COMM_WORLD` is used by default.
-
This operation can optionally return an `!mpi.retval` value that can be used
to check for errors.
}];
More information about the Mlir-commits
mailing list