[flang-commits] [mlir] [flang] [mlir] Ptr dialect (PR #73057)
Fabian Mora via flang-commits
flang-commits at lists.llvm.org
Wed Jan 3 09:13:54 PST 2024
https://github.com/fabianmcg updated https://github.com/llvm/llvm-project/pull/73057
>From 3612436d2e98bb540b025e3080cc310f7192df4a Mon Sep 17 00:00:00 2001
From: Fabian Mora <fmora.dev at gmail.com>
Date: Wed, 22 Nov 2023 00:27:44 +0000
Subject: [PATCH 1/3] [mlir] Ptr dialect
This patch introduces the Ptr dialect, a dialect to model pointer operations
motivated by the goal of modularizing the LLVM dialect.
More specifically, this patch introduces:
- The pointer dialect and type.
- The `MemorySpaceAttrInterface` interface, an interface to conceptualize memory models, giving proper semantical meaning to the Ptr dialect ops.
- The `ptr::LoadOp` operation, an operation to load data from memory, with the semantics defined by `MemorySpaceAttrInterface` and translatable to LLVM IR.
- The `SharedDialectTypeInterface` interface, an interface to delegate printing and parsing to a different dialect.
- The introduction of `LLVM::AddressSpaceAttr`, an attribute to model LLVM memory semantics.
- The replacement of `LLVMPointerType` with `ptr::PtrType`.
---
mlir/include/mlir/Dialect/CMakeLists.txt | 1 +
mlir/include/mlir/Dialect/Ptr/CMakeLists.txt | 1 +
.../mlir/Dialect/Ptr/IR/CMakeLists.txt | 19 +
.../Dialect/Ptr/IR/MemorySpaceInterfaces.h | 180 +++++
.../Dialect/Ptr/IR/MemorySpaceInterfaces.td | 190 ++++++
mlir/include/mlir/Dialect/Ptr/IR/PtrAttrs.h | 20 +
mlir/include/mlir/Dialect/Ptr/IR/PtrDialect.h | 20 +
.../include/mlir/Dialect/Ptr/IR/PtrDialect.td | 84 +++
mlir/include/mlir/Dialect/Ptr/IR/PtrEnums.td | 130 ++++
mlir/include/mlir/Dialect/Ptr/IR/PtrOps.h | 31 +
mlir/include/mlir/Dialect/Ptr/IR/PtrOps.td | 405 +++++++++++
mlir/include/mlir/Dialect/Ptr/IR/PtrTypes.h | 37 +
mlir/include/mlir/IR/BuiltinTypeInterfaces.td | 25 +
mlir/include/mlir/InitAllDialects.h | 2 +
mlir/include/mlir/Target/LLVMIR/Dialect/All.h | 3 +
.../Dialect/Ptr/PtrToLLVMIRTranslation.h | 31 +
mlir/lib/Dialect/CMakeLists.txt | 1 +
mlir/lib/Dialect/Ptr/CMakeLists.txt | 1 +
mlir/lib/Dialect/Ptr/IR/CMakeLists.txt | 15 +
mlir/lib/Dialect/Ptr/IR/PtrDialect.cpp | 635 ++++++++++++++++++
mlir/lib/IR/AsmPrinter.cpp | 5 +-
mlir/lib/Target/LLVMIR/CMakeLists.txt | 2 +
mlir/lib/Target/LLVMIR/Dialect/CMakeLists.txt | 1 +
.../Target/LLVMIR/Dialect/Ptr/CMakeLists.txt | 12 +
.../Dialect/Ptr/PtrToLLVMIRTranslation.cpp | 338 ++++++++++
mlir/lib/Target/LLVMIR/TypeToLLVM.cpp | 8 +-
26 files changed, 2195 insertions(+), 2 deletions(-)
create mode 100644 mlir/include/mlir/Dialect/Ptr/CMakeLists.txt
create mode 100644 mlir/include/mlir/Dialect/Ptr/IR/CMakeLists.txt
create mode 100644 mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h
create mode 100644 mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.td
create mode 100644 mlir/include/mlir/Dialect/Ptr/IR/PtrAttrs.h
create mode 100644 mlir/include/mlir/Dialect/Ptr/IR/PtrDialect.h
create mode 100644 mlir/include/mlir/Dialect/Ptr/IR/PtrDialect.td
create mode 100644 mlir/include/mlir/Dialect/Ptr/IR/PtrEnums.td
create mode 100644 mlir/include/mlir/Dialect/Ptr/IR/PtrOps.h
create mode 100644 mlir/include/mlir/Dialect/Ptr/IR/PtrOps.td
create mode 100644 mlir/include/mlir/Dialect/Ptr/IR/PtrTypes.h
create mode 100644 mlir/include/mlir/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.h
create mode 100644 mlir/lib/Dialect/Ptr/CMakeLists.txt
create mode 100644 mlir/lib/Dialect/Ptr/IR/CMakeLists.txt
create mode 100644 mlir/lib/Dialect/Ptr/IR/PtrDialect.cpp
create mode 100644 mlir/lib/Target/LLVMIR/Dialect/Ptr/CMakeLists.txt
create mode 100644 mlir/lib/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.cpp
diff --git a/mlir/include/mlir/Dialect/CMakeLists.txt b/mlir/include/mlir/Dialect/CMakeLists.txt
index 1c4569ecfa5848..e01fc806d039d7 100644
--- a/mlir/include/mlir/Dialect/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/CMakeLists.txt
@@ -27,6 +27,7 @@ add_subdirectory(OpenACCMPCommon)
add_subdirectory(OpenMP)
add_subdirectory(PDL)
add_subdirectory(PDLInterp)
+add_subdirectory(Ptr)
add_subdirectory(Quant)
add_subdirectory(SCF)
add_subdirectory(Shape)
diff --git a/mlir/include/mlir/Dialect/Ptr/CMakeLists.txt b/mlir/include/mlir/Dialect/Ptr/CMakeLists.txt
new file mode 100644
index 00000000000000..f33061b2d87cff
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Ptr/CMakeLists.txt
@@ -0,0 +1 @@
+add_subdirectory(IR)
diff --git a/mlir/include/mlir/Dialect/Ptr/IR/CMakeLists.txt b/mlir/include/mlir/Dialect/Ptr/IR/CMakeLists.txt
new file mode 100644
index 00000000000000..86129ecef233de
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Ptr/IR/CMakeLists.txt
@@ -0,0 +1,19 @@
+add_mlir_dialect(PtrOps ptr)
+add_mlir_doc(PtrOps PtrOps Dialects/ -gen-op-doc)
+
+set(LLVM_TARGET_DEFINITIONS MemorySpaceInterfaces.td)
+mlir_tablegen(MemorySpaceInterfaces.h.inc -gen-op-interface-decls)
+mlir_tablegen(MemorySpaceInterfaces.cpp.inc -gen-op-interface-defs)
+mlir_tablegen(MemorySpaceAttrInterfaces.h.inc -gen-attr-interface-decls)
+mlir_tablegen(MemorySpaceAttrInterfaces.cpp.inc -gen-attr-interface-defs)
+add_public_tablegen_target(MLIRPtrMemorySpaceInterfacesIncGen)
+
+set(LLVM_TARGET_DEFINITIONS PtrOps.td)
+mlir_tablegen(PtrOpsEnums.h.inc -gen-enum-decls)
+mlir_tablegen(PtrOpsEnums.cpp.inc -gen-enum-defs)
+add_public_tablegen_target(MLIRPtrOpsEnumsGen)
+
+set(LLVM_TARGET_DEFINITIONS PtrOps.td)
+mlir_tablegen(PtrOpsAttributes.h.inc -gen-attrdef-decls -attrdefs-dialect=ptr)
+mlir_tablegen(PtrOpsAttributes.cpp.inc -gen-attrdef-defs -attrdefs-dialect=ptr)
+add_public_tablegen_target(MLIRPtrOpsAttributesIncGen)
diff --git a/mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h b/mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h
new file mode 100644
index 00000000000000..b4c7bd847f5bf4
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h
@@ -0,0 +1,180 @@
+//===-- MemorySpaceInterfaces.h - Memory space interfaces ------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines memory space interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_PTR_IR_MEMORYSPACEINTERFACES_H
+#define MLIR_DIALECT_PTR_IR_MEMORYSPACEINTERFACES_H
+
+#include "mlir/IR/Attributes.h"
+
+namespace mlir {
+class Operation;
+class RewriterBase;
+struct MemorySlot;
+enum class DeletionKind : int32_t;
+namespace ptr {
+enum class AtomicBinOp : uint64_t;
+enum class AtomicOrdering : uint64_t;
+/// Verifies whether the target and source types are compatible with the
+/// `addrspacecast` op in the default memory space.
+/// Compatible types are:
+/// Vectors of rank 1, or scalars of `ptr` type.
+LogicalResult verifyPtrCastDefaultImpl(Operation *op, Type tgt, Type src);
+/// Returns whether the target and source types are compatible with the
+/// `ptrtoint` and `inttoptr` ops in the memory space.
+/// Compatible types are:
+/// IntLikeTy: Vectors of rank 1, or scalars of integer types or `index` type.
+/// PtrLikeTy: Vectors of rank 1, or scalars of `ptr` type.
+LogicalResult verifyIntCastTypesDefaultImpl(Operation *op, Type intLikeTy,
+ Type ptrLikeTy);
+/// Remove blocking issues of the store op for the`PromotableMemOpInterface`
+/// interface, the default implementation always deletes the op. For more
+/// information see `PromotableMemOpInterface` in
+/// `Interfaces/MemorySlotInterfaces`.
+DeletionKind removeStoreBlockingUsesDefaultImpl();
+
+/// Utility class for holding the atomic-related information of an operation.
+struct AtomicOpInfo {
+ AtomicOpInfo(Operation *op, Type valueType, IntegerAttr alignment,
+ StringAttr syncScope, AtomicOrdering ordering, bool volatile_)
+ : op(op), valueType(valueType), alignment(alignment),
+ syncScope(syncScope), ordering(ordering), volatile_(volatile_) {}
+ /// Atomic operation.
+ Operation *op;
+ /// Type of the value being acted on.
+ Type valueType;
+ /// Alignment of the operation.
+ IntegerAttr alignment;
+ /// Sync scope of the op.
+ StringAttr syncScope;
+ /// Atomic ordering of the op.
+ AtomicOrdering ordering;
+ /// Whether the atomic operation is volatile.
+ bool volatile_;
+};
+} // namespace ptr
+} // namespace mlir
+
+#include "mlir/Dialect/Ptr/IR/MemorySpaceAttrInterfaces.h.inc"
+
+namespace mlir {
+namespace ptr {
+/// This class wraps the `MemorySpaceAttrInterface` interface, providing a safe
+/// mechanism to specify the default behavior assumed by the ptr dialect.
+class MemorySpace {
+public:
+ MemorySpace() = default;
+ MemorySpace(std::nullptr_t) {}
+ MemorySpace(MemorySpaceAttrInterface memorySpace)
+ : memorySpace(memorySpace) {}
+ MemorySpace(Attribute memorySpace)
+ : memorySpace(dyn_cast_or_null<MemorySpaceAttrInterface>(memorySpace)) {}
+
+ /// Returns the underlying memory space.
+ MemorySpaceAttrInterface getUnderlyingSpace() const { return memorySpace; }
+
+ /// Returns true if the underlying memory space is null.
+ bool isDefaultModel() const { return memorySpace == nullptr; }
+
+ /// Returns the memory space as an integer, or 0 if using the default model.
+ unsigned getAddressSpace() const {
+ return memorySpace ? memorySpace.getAddressSpace() : 0;
+ }
+
+ /// Returns the default memory space as an attribute, or nullptr if using the
+ /// default model.
+ Attribute getDefaultMemorySpace() const {
+ return memorySpace ? memorySpace.getDefaultMemorySpace() : nullptr;
+ }
+
+ /// Returns whether a type is loadable in the memory space. The default model
+ /// assumes all types are loadable.
+ bool isLoadableType(Type type) const {
+ return memorySpace ? memorySpace.isLoadableType(type) : true;
+ }
+
+ /// Returns whether a type is storable in the memory space. The default model
+ /// assumes all types are storable.
+ bool isStorableType(Type type) const {
+ return memorySpace ? memorySpace.isStorableType(type) : true;
+ }
+
+ /// Verifies whether the atomic information of an operation is compatible with
+ /// the memory space. The default model assumes the op is compatible.
+ LogicalResult verifyCompatibleAtomicOp(
+ AtomicOpInfo atomicInfo,
+ ArrayRef<AtomicOrdering> unsupportedOrderings) const {
+ return memorySpace ? memorySpace.verifyCompatibleAtomicOp(
+ atomicInfo, unsupportedOrderings)
+ : success();
+ }
+
+ /// Verifies whether an `atomicrmw` op is semantically correct according to
+ /// the memory space. The default model assumes the op is compatible.
+ LogicalResult verifyAtomicRMW(AtomicOpInfo atomicInfo,
+ AtomicBinOp binOp) const {
+ return memorySpace ? memorySpace.verifyAtomicRMW(atomicInfo, binOp)
+ : success();
+ }
+
+ /// Verifies whether a `cmpxchg` op is semantically correct according to the
+ /// memory space. The default model assumes the op is compatible.
+ LogicalResult
+ verifyAtomicAtomicCmpXchg(AtomicOpInfo atomicInfo,
+ AtomicOrdering failureOrdering) const {
+ return memorySpace ? memorySpace.verifyAtomicAtomicCmpXchg(atomicInfo,
+ failureOrdering)
+ : success();
+ }
+
+ /// Verifies whether the target and source types are compatible with the
+ /// `addrspacecast` op in the memory space. Both types are expected to be
+ /// vectors of rank 1, or scalars of `ptr` type.
+ LogicalResult verifyPtrCast(Operation *op, Type tgt, Type src) const {
+ return memorySpace ? memorySpace.verifyPtrCast(op, tgt, src)
+ : verifyPtrCastDefaultImpl(op, tgt, src);
+ }
+
+ /// Verifies whether the types are compatible with the `ptrtoint` and
+ /// `inttoptr` ops in the memory space. The first type is expected to be
+ /// integer-like, while the second must be a ptr-like type.
+ LogicalResult verifyIntCastTypes(Operation *op, Type intLikeTy,
+ Type ptrLikeTy) const {
+ return memorySpace
+ ? memorySpace.verifyIntCastTypes(op, intLikeTy, ptrLikeTy)
+ : verifyIntCastTypesDefaultImpl(op, intLikeTy, ptrLikeTy);
+ }
+
+ /// Remove blocking issues of the store op for the`PromotableMemOpInterface`
+ /// interface. For more information see `PromotableMemOpInterface` in
+ /// `Interfaces/MemorySlotInterfaces`.
+ DeletionKind
+ removeStoreBlockingUses(Operation *storeOp, Value value,
+ const MemorySlot &slot,
+ const SmallPtrSetImpl<OpOperand *> &blockingUses,
+ RewriterBase &rewriter, Value reachingDefinition) {
+ return memorySpace
+ ? memorySpace.removeStoreBlockingUses(storeOp, value, slot,
+ blockingUses, rewriter,
+ reachingDefinition)
+ : removeStoreBlockingUsesDefaultImpl();
+ }
+
+protected:
+ /// Underlying memory space.
+ MemorySpaceAttrInterface memorySpace{};
+};
+} // namespace ptr
+} // namespace mlir
+
+#include "mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h.inc"
+
+#endif // MLIR_DIALECT_PTR_IR_MEMORYSPACEINTERFACES_H
diff --git a/mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.td b/mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.td
new file mode 100644
index 00000000000000..b54e3d503ad5c7
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.td
@@ -0,0 +1,190 @@
+//===-- MemorySpaceInterfaces.td - Memory space interfaces ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines memory space attribute interfaces.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PTR_MEMORYSPACEINTERFACES
+#define PTR_MEMORYSPACEINTERFACES
+
+include "mlir/IR/AttrTypeBase.td"
+include "mlir/IR/OpBase.td"
+
+//===----------------------------------------------------------------------===//
+// Memory space attribute interface.
+//===----------------------------------------------------------------------===//
+
+def MemorySpaceAttrInterface : AttrInterface<"MemorySpaceAttrInterface"> {
+ let description = [{
+ This interface defines a common API for interacting with the memory model of
+ a memory space and the operations in the pointer dialect, giving proper
+ semantical meaning to the ops.
+
+ Furthermore, this interface allows concepts such as read-only memory to be
+ adequately modeled and enforced.
+ }];
+ let cppNamespace = "::mlir::ptr";
+ let methods = [
+ InterfaceMethod<
+ /*desc=*/ [{
+ Returns the dialect owning the memory model.
+ }],
+ /*returnType=*/ "Dialect*",
+ /*methodName=*/ "getModelOwner",
+ /*args=*/ (ins),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{ return nullptr; }]
+ >,
+ InterfaceMethod<
+ /*desc=*/ [{
+ Returns the memory space as an integer, or 0 if using the default model.
+ }],
+ /*returnType=*/ "unsigned",
+ /*methodName=*/ "getAddressSpace",
+ /*args=*/ (ins),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{ return 0; }]
+ >,
+ InterfaceMethod<
+ /*desc=*/ [{
+ Returns the default memory space as an attribute, or `nullptr` if using
+ the default model.
+ }],
+ /*returnType=*/ "Attribute",
+ /*methodName=*/ "getDefaultMemorySpace",
+ /*args=*/ (ins),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{ return nullptr; }]
+ >,
+ InterfaceMethod<
+ /*desc=*/ [{
+ Returns whether a type is loadable in the memory space. The default model
+ assumes all types are loadable.
+ }],
+ /*returnType=*/ "bool",
+ /*methodName=*/ "isLoadableType",
+ /*args=*/ (ins "Type":$type),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{ return true; }]
+ >,
+ InterfaceMethod<
+ /*desc=*/ [{
+ Returns whether a type is storable in the memory space. The default model
+ assumes all types are storable.
+ }],
+ /*returnType=*/ "bool",
+ /*methodName=*/ "isStorableType",
+ /*args=*/ (ins "Type":$type),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{ return true; }]
+ >,
+ InterfaceMethod<
+ /*desc=*/ [{
+ Verifies whether the atomic information of an operation is compatible with
+ the memory space.
+ The default model assumes the op is compatible
+ }],
+ /*returnType=*/ "LogicalResult",
+ /*methodName=*/ "verifyCompatibleAtomicOp",
+ /*args=*/ (ins "ptr::AtomicOpInfo":$atomicInfo,
+ "ArrayRef<ptr::AtomicOrdering>":$unsupportedOrdering),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{ return success(); }]
+ >,
+ InterfaceMethod<
+ /*desc=*/ [{
+ Verifies whether an `atomicrmw` op is semantically correct according to
+ the memory space.
+ The default model assumes the op is compatible.
+ }],
+ /*returnType=*/ "LogicalResult",
+ /*methodName=*/ "verifyAtomicRMW",
+ /*args=*/ (ins "ptr::AtomicOpInfo":$atomicInfo,
+ "ptr::AtomicBinOp":$binOp),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{ return success(); }]
+ >,
+ InterfaceMethod<
+ /*desc=*/ [{
+ Verifies whether a `cmpxchg` op is semantically correct according to the
+ memory space.
+ The default model assumes the op is compatible.
+ }],
+ /*returnType=*/ "LogicalResult",
+ /*methodName=*/ "verifyAtomicAtomicCmpXchg",
+ /*args=*/ (ins "ptr::AtomicOpInfo":$atomicInfo,
+ "ptr::AtomicOrdering":$failureOrdering),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{ return success(); }]
+ >,
+ InterfaceMethod<
+ /*desc=*/ [{
+ Verifies whether the target and source types are compatible with the
+ `addrspacecast` op in the memory space.
+ Both types are expected to be vectors of rank 1, or scalars of `ptr`
+ type.
+ }],
+ /*returnType=*/ "LogicalResult",
+ /*methodName=*/ "verifyPtrCast",
+ /*args=*/ (ins "Operation*":$op, "Type":$tgt, "Type":$src),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{ return verifyPtrCastDefaultImpl(op, tgt, src); }]
+ >,
+ InterfaceMethod<
+ /*desc=*/ [{
+ Verifies whether the types are compatible with the `ptrtoint` and
+ `inttoptr` ops in the memory space.
+ The first type is expected to be nteger-like, while the second must be a
+ ptr-like type.
+ }],
+ /*returnType=*/ "LogicalResult",
+ /*methodName=*/ "verifyIntCastTypes",
+ /*args=*/ (ins "Operation*":$op, "Type":$intLikeTy, "Type":$ptrLikeTy),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{ return verifyIntCastTypesDefaultImpl(op, intLikeTy, ptrLikeTy); }]
+ >,
+ InterfaceMethod<
+ /*desc=*/ [{
+ Remove blocking issues of the store op for the`PromotableMemOpInterface`
+ interface. For more information see `PromotableMemOpInterface` in
+ `Interfaces/MemorySlotInterfaces`.
+ }],
+ /*returnType=*/ "DeletionKind",
+ /*methodName=*/ "removeStoreBlockingUses",
+ /*args=*/ (ins "Operation*":$storeOp,
+ "Value":$value,
+ "const MemorySlot &":$slot,
+ "const SmallPtrSetImpl<OpOperand *> &":$blockingUses,
+ "RewriterBase &":$rewriter,
+ "Value":$reachingDefinition),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{ return removeStoreBlockingUsesDefaultImpl(); }]
+ >,
+ ];
+}
+
+def MemorySpaceOpInterface : OpInterface<"MemorySpaceOpInterface"> {
+ let description = [{
+ An interface for operations with a memory space controlling their semantics.
+ }];
+
+ let cppNamespace = "::mlir::ptr";
+
+ let methods = [
+ InterfaceMethod<
+ /*desc=*/ "Returns the memory space interface controlling the op.",
+ /*returnType=*/ "MemorySpace",
+ /*methodName=*/ "getMemorySpace",
+ /*args=*/ (ins),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{}]
+ >,
+ ];
+}
+#endif // PTR_MEMORYSPACEINTERFACES
diff --git a/mlir/include/mlir/Dialect/Ptr/IR/PtrAttrs.h b/mlir/include/mlir/Dialect/Ptr/IR/PtrAttrs.h
new file mode 100644
index 00000000000000..e6aa7635919f6d
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Ptr/IR/PtrAttrs.h
@@ -0,0 +1,20 @@
+//===- PtrAttrs.h - Pointer dialect attributes ------------------*- C++ -*-===//
+//
+// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares the Ptr dialect attributes.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_PTR_IR_PTRATTRS_H
+#define MLIR_DIALECT_PTR_IR_PTRATTRS_H
+
+#include "mlir/IR/OpImplementation.h"
+
+#include "mlir/Dialect/Ptr/IR/PtrOpsEnums.h.inc"
+
+#endif // MLIR_DIALECT_PTR_IR_PTRATTRS_H
diff --git a/mlir/include/mlir/Dialect/Ptr/IR/PtrDialect.h b/mlir/include/mlir/Dialect/Ptr/IR/PtrDialect.h
new file mode 100644
index 00000000000000..1e91ce60821d4b
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Ptr/IR/PtrDialect.h
@@ -0,0 +1,20 @@
+//===- PointerDialect.h - Pointer dialect -----------------------*- C++ -*-===//
+//
+// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Ptr dialect.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_PTR_IR_PTRDIALECT_H
+#define MLIR_DIALECT_PTR_IR_PTRDIALECT_H
+
+#include "mlir/IR/Dialect.h"
+
+#include "mlir/Dialect/Ptr/IR/PtrOpsDialect.h.inc"
+
+#endif // MLIR_DIALECT_PTR_IR_PTRDIALECT_H
diff --git a/mlir/include/mlir/Dialect/Ptr/IR/PtrDialect.td b/mlir/include/mlir/Dialect/Ptr/IR/PtrDialect.td
new file mode 100644
index 00000000000000..820e57b582627f
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Ptr/IR/PtrDialect.td
@@ -0,0 +1,84 @@
+//===- PointerDialect.td - Pointer dialect -----------------*- tablegen -*-===//
+//
+// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PTR_DIALECT
+#define PTR_DIALECT
+
+include "mlir/Interfaces/DataLayoutInterfaces.td"
+include "mlir/IR/AttrTypeBase.td"
+include "mlir/IR/BuiltinTypeInterfaces.td"
+include "mlir/IR/OpBase.td"
+
+//===----------------------------------------------------------------------===//
+// Pointer dialect definition.
+//===----------------------------------------------------------------------===//
+
+def Ptr_Dialect : Dialect {
+ let name = "ptr";
+ let summary = "Pointer dialect";
+ let cppNamespace = "::mlir::ptr";
+ let useDefaultTypePrinterParser = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// Pointer type definitions
+//===----------------------------------------------------------------------===//
+
+class Pointer_Type<string name, string typeMnemonic, list<Trait> traits = []>
+ : TypeDef<Ptr_Dialect, name, traits> {
+ let mnemonic = typeMnemonic;
+}
+
+def PtrType : Pointer_Type<"Ptr", "ptr", [
+ MemRefElementTypeInterface,
+ DeclareTypeInterfaceMethods<DataLayoutTypeInterface, [
+ "areCompatible", "verifyEntries"]>,
+ DeclareTypeInterfaceMethods<SharedDialectTypeInterface, [
+ "getSharedDialect"]>
+ ]> {
+ let summary = "Pointer type";
+ let description = [{
+ The `ptr` type is an opaque pointer type. This type typically represents
+ a reference to an object in memory. Pointers are optionally parameterized
+ by a memory space.
+ Syntax:
+
+ ```mlir
+ pointer ::= `ptr` (`<` memory-space `>`)?
+ memory-space ::= attribute-value
+ ```
+ }];
+ let parameters = (ins OptionalParameter<"Attribute">:$memorySpace);
+ let assemblyFormat = "(`<` $memorySpace^ `>`)?";
+ let skipDefaultBuilders = 1;
+ let builders = [
+ TypeBuilder<(ins CArg<"Attribute", "nullptr">:$addressSpace), [{
+ return $_get($_ctxt, addressSpace);
+ }]>,
+ TypeBuilder<(ins CArg<"unsigned">:$addressSpace), [{
+ return $_get($_ctxt, IntegerAttr::get(IntegerType::get($_ctxt, 32),
+ addressSpace));
+ }]>
+ ];
+ let extraClassDeclaration = [{
+ /// Returns the default memory space.
+ Attribute getDefaultMemorySpace() const;
+
+ /// Returns the memory space as an unsigned number.
+ int64_t getAddressSpace() const;
+ }];
+}
+
+//===----------------------------------------------------------------------===//
+// Base address operation definition.
+//===----------------------------------------------------------------------===//
+
+class Pointer_Op<string mnemonic, list<Trait> traits = []> :
+ Op<Ptr_Dialect, mnemonic, traits>;
+
+#endif // PTR_DIALECT
diff --git a/mlir/include/mlir/Dialect/Ptr/IR/PtrEnums.td b/mlir/include/mlir/Dialect/Ptr/IR/PtrEnums.td
new file mode 100644
index 00000000000000..ddc4fd1237b50a
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Ptr/IR/PtrEnums.td
@@ -0,0 +1,130 @@
+//===-- PtrEnums.td - Ptr dialect enum file ----------------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PTR_ENUMS
+#define PTR_ENUMS
+
+include "mlir/Dialect/Ptr/IR/PtrDialect.td"
+include "mlir/IR/EnumAttr.td"
+
+//===----------------------------------------------------------------------===//
+// Base classes for Ptr enum attributes.
+//===----------------------------------------------------------------------===//
+
+// Case of the LLVM enum attribute backed by I64Attr with customized string
+// representation that corresponds to what is visible in the textual IR form.
+// The parameters are as follows:
+// - `cppSym`: name of the C++ enumerant for this case in MLIR API;
+// - `irSym`: keyword used in the custom form of MLIR operation;
+// - `llvmSym`: name of the C++ enumerant for this case in LLVM API.
+// For example, `Ptr_EnumAttrCase<"Weak", "weak", "WeakAnyLinkage">` is usable
+// as `<MlirEnumName>::Weak` in MLIR API, `WeakAnyLinkage` in LLVM API and
+// is printed/parsed as `weak` in MLIR custom textual format.
+class Ptr_EnumAttrCase<string cppSym, string irSym, string llvmSym, int val> :
+ I64EnumAttrCase<cppSym, val, irSym> {
+ // The name of the equivalent enumerant in LLVM.
+ string llvmEnumerant = llvmSym;
+}
+
+// LLVM enum attribute backed by I64Attr with string representation
+// corresponding to what is visible in the textual IR form.
+// The parameters are as follows:
+// - `name`: name of the C++ enum class in MLIR API;
+// - `llvmName`: name of the C++ enum in LLVM API;
+// - `description`: textual description for documentation purposes;
+// - `cases`: list of enum cases;
+// - `unsupportedCases`: optional list of unsupported enum cases.
+// For example, `Ptr_EnumAttr<Linkage, "::llvm::GlobalValue::LinkageTypes`
+// produces `mlir::ptr::Linkage` enum class in MLIR API that corresponds to (a
+// subset of) values in the `llvm::GlobalValue::LinkageTypes` in LLVM API.
+// All unsupported cases are excluded from the MLIR enum and trigger an error
+// during the import from LLVM IR. They are useful to handle sentinel values
+// such as `llvm::AtomicRMWInst::BinOp::BAD_BINOP` that LLVM commonly uses to
+// terminate its enums.
+class Ptr_EnumAttr<string name, string llvmName, string description,
+ list<Ptr_EnumAttrCase> cases,
+ list<Ptr_EnumAttrCase> unsupportedCases = []> :
+ I64EnumAttr<name, description, cases> {
+ // List of unsupported cases that have no conversion to an MLIR value.
+ list<Ptr_EnumAttrCase> unsupported = unsupportedCases;
+
+ // The equivalent enum class name in LLVM.
+ string llvmClassName = llvmName;
+}
+
+//===----------------------------------------------------------------------===//
+// Atomic binary op enum attribute
+//===----------------------------------------------------------------------===//
+
+def AtomicBinOpXchg : Ptr_EnumAttrCase<"xchg", "xchg", "Xchg", 0>;
+def AtomicBinOpAdd : Ptr_EnumAttrCase<"add", "add", "Add", 1>;
+def AtomicBinOpSub : Ptr_EnumAttrCase<"sub", "sub", "Sub", 2>;
+def AtomicBinOpAnd : Ptr_EnumAttrCase<"_and", "_and", "And", 3>;
+def AtomicBinOpNand : Ptr_EnumAttrCase<"nand", "nand", "Nand", 4>;
+def AtomicBinOpOr : Ptr_EnumAttrCase<"_or", "_or", "Or", 5>;
+def AtomicBinOpXor : Ptr_EnumAttrCase<"_xor", "_xor", "Xor", 6>;
+def AtomicBinOpMax : Ptr_EnumAttrCase<"max", "max", "Max", 7>;
+def AtomicBinOpMin : Ptr_EnumAttrCase<"min", "min", "Min", 8>;
+def AtomicBinOpUMax : Ptr_EnumAttrCase<"umax", "umax", "UMax", 9>;
+def AtomicBinOpUMin : Ptr_EnumAttrCase<"umin", "umin", "UMin", 10>;
+def AtomicBinOpFAdd : Ptr_EnumAttrCase<"fadd", "fadd", "FAdd", 11>;
+def AtomicBinOpFSub : Ptr_EnumAttrCase<"fsub", "fsub", "FSub", 12>;
+def AtomicBinOpFMax : Ptr_EnumAttrCase<"fmax", "fmax", "FMax", 13>;
+def AtomicBinOpFMin : Ptr_EnumAttrCase<"fmin", "fmin", "FMin", 14>;
+def AtomicBinOpUIncWrap : Ptr_EnumAttrCase<"uinc_wrap",
+ "uinc_wrap", "UIncWrap", 15>;
+def AtomicBinOpUDecWrap : Ptr_EnumAttrCase<"udec_wrap",
+ "udec_wrap", "UDecWrap", 16>;
+
+// A sentinel value that has no MLIR counterpart.
+def AtomicBadBinOp : Ptr_EnumAttrCase<"", "", "BAD_BINOP", 0>;
+
+def AtomicBinOp : Ptr_EnumAttr<
+ "AtomicBinOp",
+ "::llvm::AtomicRMWInst::BinOp",
+ "llvm.atomicrmw binary operations",
+ [AtomicBinOpXchg, AtomicBinOpAdd, AtomicBinOpSub, AtomicBinOpAnd,
+ AtomicBinOpNand, AtomicBinOpOr, AtomicBinOpXor, AtomicBinOpMax,
+ AtomicBinOpMin, AtomicBinOpUMax, AtomicBinOpUMin, AtomicBinOpFAdd,
+ AtomicBinOpFSub, AtomicBinOpFMax, AtomicBinOpFMin, AtomicBinOpUIncWrap,
+ AtomicBinOpUDecWrap],
+ [AtomicBadBinOp]> {
+ let cppNamespace = "::mlir::ptr";
+}
+
+//===----------------------------------------------------------------------===//
+// Atomic ordering enum attribute
+//===----------------------------------------------------------------------===//
+
+def AtomicOrderingNotAtomic : Ptr_EnumAttrCase<"not_atomic",
+ "not_atomic", "NotAtomic", 0>;
+def AtomicOrderingUnordered : Ptr_EnumAttrCase<"unordered",
+ "unordered", "Unordered", 1>;
+def AtomicOrderingMonotonic : Ptr_EnumAttrCase<"monotonic",
+ "monotonic", "Monotonic", 2>;
+def AtomicOrderingAcquire : Ptr_EnumAttrCase<"acquire",
+ "acquire", "Acquire", 4>;
+def AtomicOrderingRelease : Ptr_EnumAttrCase<"release",
+ "release", "Release", 5>;
+def AtomicOrderingAcquireRelease :
+ Ptr_EnumAttrCase<"acq_rel", "acq_rel", "AcquireRelease", 6>;
+def AtomicOrderingSequentiallyConsistent :
+ Ptr_EnumAttrCase<"seq_cst", "seq_cst", "SequentiallyConsistent", 7>;
+
+def AtomicOrdering : Ptr_EnumAttr<
+ "AtomicOrdering",
+ "::llvm::AtomicOrdering",
+ "Atomic ordering for LLVM's memory model",
+ [AtomicOrderingNotAtomic, AtomicOrderingUnordered, AtomicOrderingMonotonic,
+ AtomicOrderingAcquire, AtomicOrderingRelease, AtomicOrderingAcquireRelease,
+ AtomicOrderingSequentiallyConsistent
+ ]> {
+ let cppNamespace = "::mlir::ptr";
+}
+
+#endif // PTR_ENUMS
diff --git a/mlir/include/mlir/Dialect/Ptr/IR/PtrOps.h b/mlir/include/mlir/Dialect/Ptr/IR/PtrOps.h
new file mode 100644
index 00000000000000..933462d00c4695
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Ptr/IR/PtrOps.h
@@ -0,0 +1,31 @@
+//===- PointerDialect.h - Pointer dialect -----------------------*- C++ -*-===//
+//
+// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Pointer dialect.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_PTR_IR_PTROPS_H
+#define MLIR_DIALECT_PTR_IR_PTROPS_H
+
+#include "mlir/Bytecode/BytecodeOpInterface.h"
+#include "mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h"
+#include "mlir/Dialect/Ptr/IR/PtrAttrs.h"
+#include "mlir/Dialect/Ptr/IR/PtrDialect.h"
+#include "mlir/Dialect/Ptr/IR/PtrTypes.h"
+#include "mlir/IR/OpDefinition.h"
+#include "mlir/Interfaces/MemorySlotInterfaces.h"
+#include "mlir/Interfaces/SideEffectInterfaces.h"
+
+#define GET_ATTRDEF_CLASSES
+#include "mlir/Dialect/Ptr/IR/PtrOpsAttributes.h.inc"
+
+#define GET_OP_CLASSES
+#include "mlir/Dialect/Ptr/IR/PtrOps.h.inc"
+
+#endif // MLIR_DIALECT_PTR_IR_PTROPS_H
diff --git a/mlir/include/mlir/Dialect/Ptr/IR/PtrOps.td b/mlir/include/mlir/Dialect/Ptr/IR/PtrOps.td
new file mode 100644
index 00000000000000..b5af8152bbfb62
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Ptr/IR/PtrOps.td
@@ -0,0 +1,405 @@
+//===- PointerOps.td - Pointer dialect ops -----------------*- tablegen -*-===//
+//
+// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
+// See https://ptr.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef PTR_OPS
+#define PTR_OPS
+
+include "mlir/Dialect/Ptr/IR/PtrDialect.td"
+include "mlir/Dialect/Ptr/IR/PtrEnums.td"
+include "mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.td"
+include "mlir/IR/OpAsmInterface.td"
+include "mlir/Interfaces/MemorySlotInterfaces.td"
+include "mlir/Interfaces/SideEffectInterfaces.td"
+include "mlir/IR/EnumAttr.td"
+
+//===----------------------------------------------------------------------===//
+// AtomicRMWOp
+//===----------------------------------------------------------------------===//
+def PtrAtomicRMWType : AnyTypeOf<[AnyFloat, PtrType, AnyInteger]>;
+def Ptr_AtomicRMWOp : Pointer_Op<"atomicrmw", [
+ TypesMatchWith<"result #0 and operand #1 have the same type",
+ "val", "res", "$_self">,
+ DeclareOpInterfaceMethods<MemorySpaceOpInterface>
+ ]> {
+ let summary = "Atomic read-modify-write operation";
+ let description = [{
+ The `atomicrmw` operation provides a way to perform a read-modify-write
+ sequence that is free from data races. The `bin_op` enumeration specifies
+ the modification to perform. The `val` operand represents the new value to
+ be applied during the modification. The `ptr` operand represents the
+ pointer that the read and write will be performed against.
+ The result represents the latest value that was stored.
+
+ Examples:
+ ```mlir
+ %old = ptr.atomicrmw volatile add %ptr, %val acq_rel `:` !ptr.ptr , i32
+ ```
+ }];
+ let arguments = (ins AtomicBinOp:$bin_op,
+ PtrType:$ptr,
+ PtrAtomicRMWType:$val,
+ AtomicOrdering:$ordering,
+ OptionalAttr<StrAttr>:$syncscope,
+ OptionalAttr<I64Attr>:$alignment,
+ UnitAttr:$volatile_);
+ let results = (outs PtrAtomicRMWType:$res);
+ let assemblyFormat = [{
+ (`volatile` $volatile_^)? $bin_op $ptr `,` $val
+ (`syncscope` `(` $syncscope^ `)`)? $ordering attr-dict `:`
+ qualified(type($ptr)) `,` type($val)
+ }];
+ let builders = [
+ OpBuilder<(ins "ptr::AtomicBinOp":$binOp, "Value":$ptr, "Value":$val,
+ "ptr::AtomicOrdering":$ordering,
+ CArg<"StringRef", "StringRef()">:$syncscope,
+ CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isVolatile
+ )>
+ ];
+ let hasVerifier = 1;
+ let extraClassDeclaration = [{
+ AtomicOpInfo getAtomicOpInfo() {
+ return AtomicOpInfo(*this, getVal().getType(), getAlignmentAttr(),
+ getSyncscopeAttr(), getOrdering(), getVolatile_());
+ }
+ SmallVector<Value> getAccessedOperands() {
+ return {getPtr()};
+ }
+ }];
+}
+
+//===----------------------------------------------------------------------===//
+// AtomicCmpXchgOp
+//===----------------------------------------------------------------------===//
+def Ptr_AtomicCmpXchgType : AnyTypeOf<[AnyInteger, PtrType]>;
+def Ptr_AtomicCmpXchgOp : Pointer_Op<"cmpxchg", [
+ TypesMatchWith<"operand #1 and operand #2 have the same type",
+ "val", "cmp", "$_self">,
+ DeclareOpInterfaceMethods<MemorySpaceOpInterface>
+ ]> {
+ let summary = "Atomic compare exchange operation";
+ let description = [{
+ The`cmpxchg` instruction is used to atomically modify memory. It loads a
+ value in memory and compares it to a given value. If they are equal, it
+ tries to store a new value into the memory.
+
+ Examples:
+ ```mlir
+ %res = ptr.cmpxchg %ptr, %cmp, %val acq_rel acq_rel : !ptr.ptr, i32
+ ```
+ }];
+ let arguments = (ins PtrType:$ptr,
+ Ptr_AtomicCmpXchgType:$cmp,
+ Ptr_AtomicCmpXchgType:$val,
+ AtomicOrdering:$success_ordering,
+ AtomicOrdering:$failure_ordering,
+ OptionalAttr<StrAttr>:$syncscope,
+ OptionalAttr<I64Attr>:$alignment,
+ UnitAttr:$weak,
+ UnitAttr:$volatile_);
+ let results = (outs AnyType:$res);
+ let assemblyFormat = [{
+ (`weak` $weak^)? (`volatile` $volatile_^)? $ptr `,` $cmp `,` $val
+ (`syncscope` `(` $syncscope^ `)`)? $success_ordering $failure_ordering
+ attr-dict `:` qualified(type($ptr)) `,` type($val) `,` type($res)
+ }];
+ let builders = [
+ OpBuilder<(ins "Value":$ptr, "Value":$cmp, "Value":$val,
+ "ptr::AtomicOrdering":$successOrdering,
+ "ptr::AtomicOrdering":$failureOrdering,
+ CArg<"StringRef", "StringRef()">:$syncscope,
+ CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isWeak,
+ CArg<"bool", "false">:$isVolatile
+ )>
+ ];
+ let hasVerifier = 1;
+ let extraClassDeclaration = [{
+ AtomicOpInfo getAtomicOpInfo() {
+ return AtomicOpInfo(*this, getVal().getType(), getAlignmentAttr(),
+ getSyncscopeAttr(), getSuccessOrdering(), getVolatile_());
+ }
+ }];
+}
+
+//===----------------------------------------------------------------------===//
+// LoadOp
+//===----------------------------------------------------------------------===//
+def Ptr_LoadOp : Pointer_Op<"load", [
+ DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
+ DeclareOpInterfaceMethods<PromotableMemOpInterface>,
+ DeclareOpInterfaceMethods<SafeMemorySlotAccessOpInterface>,
+ DeclareOpInterfaceMethods<MemorySpaceOpInterface>
+ ]> {
+ let summary = "Load operation";
+ let description = [{
+ The `load` operation is used to read from memory. A load may be marked as
+ atomic, volatile, and/or nontemporal, and takes a number of optional
+ attributes that specify aliasing information.
+
+ An atomic load only supports a limited set of pointer, integer, and
+ floating point types, and requires an explicit alignment.
+
+ Examples:
+ ```mlir
+ // A volatile load of a float variable.
+ %0 = ptr.load volatile %ptr : !ptr.ptr -> f32
+
+ // A nontemporal load of a float variable.
+ %0 = ptr.load %ptr {nontemporal} : !ptr.ptr -> f32
+
+ // An atomic load of an integer variable.
+ %0 = ptr.load %ptr atomic monotonic {alignment = 8 : i64}
+ : !ptr.ptr -> i64
+ ```
+ }];
+ let arguments = (ins PtrType:$addr,
+ OptionalAttr<I64Attr>:$alignment,
+ UnitAttr:$volatile_,
+ UnitAttr:$nontemporal,
+ DefaultValuedAttr<
+ AtomicOrdering, "ptr::AtomicOrdering::not_atomic">:$ordering,
+ OptionalAttr<StrAttr>:$syncscope);
+ let results = (outs AnyType:$res);
+ let assemblyFormat = [{
+ (`volatile` $volatile_^)? $addr
+ (`atomic` (`syncscope` `(` $syncscope^ `)`)? $ordering^)?
+ attr-dict `:` qualified(type($addr)) `->` type($res)
+ }];
+ let builders = [
+ OpBuilder<(ins "Type":$type, "Value":$addr,
+ CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isVolatile,
+ CArg<"bool", "false">:$isNonTemporal,
+ CArg<"ptr::AtomicOrdering", "ptr::AtomicOrdering::not_atomic">:$ordering,
+ CArg<"StringRef", "StringRef()">:$syncscope)>
+ ];
+ let hasVerifier = 1;
+ let extraClassDeclaration = [{
+ AtomicOpInfo getAtomicOpInfo() {
+ return AtomicOpInfo(*this, getRes().getType(), getAlignmentAttr(),
+ getSyncscopeAttr(), getOrdering(), getVolatile_());
+ }
+ SmallVector<Value> getAccessedOperands() {
+ return {getAddr()};
+ }
+ }];
+}
+
+//===----------------------------------------------------------------------===//
+// StoreOp
+//===----------------------------------------------------------------------===//
+def Ptr_StoreOp : Pointer_Op<"store", [
+ DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
+ DeclareOpInterfaceMethods<PromotableMemOpInterface>,
+ DeclareOpInterfaceMethods<SafeMemorySlotAccessOpInterface>,
+ DeclareOpInterfaceMethods<MemorySpaceOpInterface>
+ ]> {
+ let summary = "Store operation";
+ let description = [{
+ The `store` operation is used to write to memory. A store may be marked as
+ atomic, volatile, and/or nontemporal, and takes a number of optional
+ attributes that specify aliasing information.
+
+ An atomic store only supports a limited set of pointer, integer, and
+ floating point types, and requires an explicit alignment.
+
+ Examples:
+ ```mlir
+ // A volatile store of a float variable.
+ ptr.store volatile %val, %ptr : f32, !ptr.ptr
+
+ // A nontemporal store of a float variable.
+ ptr.store %val, %ptr {nontemporal} : f32, !ptr.ptr
+
+ // An atomic store of an integer variable.
+ ptr.store %val, %ptr atomic monotonic {alignment = 8 : i64}
+ : i64, !ptr.ptr
+ ```
+ }];
+ let arguments = (ins AnyType:$value,
+ PtrType:$addr,
+ OptionalAttr<I64Attr>:$alignment,
+ UnitAttr:$volatile_,
+ UnitAttr:$nontemporal,
+ DefaultValuedAttr<
+ AtomicOrdering, "ptr::AtomicOrdering::not_atomic">:$ordering,
+ OptionalAttr<StrAttr>:$syncscope);
+ let assemblyFormat = [{
+ (`volatile` $volatile_^)? $value `,` $addr
+ (`atomic` (`syncscope` `(` $syncscope^ `)`)? $ordering^)?
+ attr-dict `:` type($value) `,` qualified(type($addr))
+ }];
+ let builders = [
+ OpBuilder<(ins "Value":$value, "Value":$addr,
+ CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isVolatile,
+ CArg<"bool", "false">:$isNonTemporal,
+ CArg<"ptr::AtomicOrdering", "ptr::AtomicOrdering::not_atomic">:$ordering,
+ CArg<"StringRef", "StringRef()">:$syncscope)>
+ ];
+ let hasVerifier = 1;
+ let extraClassDeclaration = [{
+ AtomicOpInfo getAtomicOpInfo() {
+ return AtomicOpInfo(*this, getValue().getType(), getAlignmentAttr(),
+ getSyncscopeAttr(), getOrdering(), getVolatile_());
+ }
+ SmallVector<Value> getAccessedOperands() {
+ return {getAddr()};
+ }
+ }];
+}
+
+//===----------------------------------------------------------------------===//
+// AddrSpaceCastOp
+//===----------------------------------------------------------------------===//
+def Ptr_AddrSpaceCastOp : Pointer_Op<"addrspacecast", [
+ Pure,
+ DeclareOpInterfaceMethods<PromotableOpInterface>,
+ DeclareOpInterfaceMethods<MemorySpaceOpInterface>
+ ]> {
+ let summary = "Address space cast operation";
+ let description = [{
+ The `addrspacecast` operation casts pointers between memory spaces.
+
+ Example:
+ ```mlir
+ %ptr = ptr.addrspacecast %addr : !ptr.ptr to !ptr.ptr<1 : i32>
+ ```
+ }];
+ let arguments = (ins AnyType:$arg);
+ let results = (outs AnyType:$res);
+ let assemblyFormat = "$arg attr-dict `:` type($arg) `to` type($res)";
+ let hasVerifier = 1;
+ let hasFolder = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// IntToPtrOp
+//===----------------------------------------------------------------------===//
+def Ptr_IntToPtrOp : Pointer_Op<"inttoptr", [
+ Pure,
+ DeclareOpInterfaceMethods<MemorySpaceOpInterface>
+ ]> {
+ let summary = "Integer to a pointer operation";
+ let description = [{
+ The `inttoptr` operation casts an int or index value to a pointer.
+
+ Example:
+ ```mlir
+ %ptr = ptr.inttoptr %int : i32 to !ptr.ptr<1 : i32>
+ ```
+ }];
+ let arguments = (ins AnyType:$arg);
+ let results = (outs AnyType:$res);
+ let assemblyFormat = "$arg attr-dict `:` type($arg) `to` type($res)";
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// PtrToIntOp
+//===----------------------------------------------------------------------===//
+def Ptr_PtrToIntOp : Pointer_Op<"ptrtoint", [
+ Pure,
+ DeclareOpInterfaceMethods<MemorySpaceOpInterface>
+ ]> {
+ let summary = "Pointer to an integer operation";
+ let description = [{
+ The `ptrtoint` operation casts a pointer value to an int or index.
+
+ Example:
+ ```mlir
+ %int = ptr.ptrtoint %ptr : !ptr.ptr<1 : i32> to i32
+ ```
+ }];
+ let arguments = (ins AnyType:$arg);
+ let results = (outs AnyType:$res);
+ let assemblyFormat = "$arg attr-dict `:` type($arg) `to` type($res)";
+ let hasVerifier = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// ConstantOp
+//===----------------------------------------------------------------------===//
+def Ptr_ConstantOp : Pointer_Op<"constant", [
+ ConstantLike, Pure,
+ DeclareOpInterfaceMethods<OpAsmOpInterface, ["getAsmResultNames"]>,
+ DeclareOpInterfaceMethods<MemorySpaceOpInterface>
+ ]> {
+ let summary = "Pointer constant operation";
+ let description = [{
+ The `ptr.constant` operation produces a pointer-typed SSA value equal to
+ some index constant.
+
+ Example:
+
+ ```mlir
+ %ptr = ptr.constant 0
+ %ptr = ptr.constant 1 : !ptr.ptr<3 : i32>
+ ```
+ }];
+ let arguments = (ins IndexAttr:$value);
+ let results = (outs PtrType:$result);
+ let builders = [
+ OpBuilder<(ins "int64_t":$value, CArg<"Attribute", "nullptr">:$addressSpace)>
+ ];
+ let assemblyFormat = "attr-dict $value custom<PtrType>(type($result))";
+ let hasFolder = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// TypeOffsetOp
+//===----------------------------------------------------------------------===//
+def Ptr_TypeOffsetOp : Pointer_Op<"type_offset", [ConstantLike, Pure]> {
+ let summary = "Type offset constant operation";
+ let description = [{
+ The `type_offset` operation produces an int or index-typed SSA value
+ equal to a target-specific constant representing the offset of a single
+ element of the given type. The default return type is `index`.
+ Example:
+
+ ```mlir
+ %0 = ptr.type_offset f32
+ %1 = ptr.type_offset memref<12 x f64> : i32
+ ```
+ }];
+
+ let arguments = (ins TypeAttr:$baseType);
+ let results = (outs AnySignlessIntegerOrIndex:$result);
+ let builders = [
+ OpBuilder<(ins "TypeAttr":$baseType, CArg<"Type", "nullptr">:$resultTy)>
+ ];
+ let assemblyFormat = [{
+ attr-dict $baseType custom<IntType>(type($result))
+ }];
+ let hasFolder = 1;
+}
+
+//===----------------------------------------------------------------------===//
+// PtrAddOp
+//===----------------------------------------------------------------------===//
+def Ptr_PtrAddOp : Pointer_Op<"ptradd", [
+ Pure, AllTypesMatch<["base", "result"]>,
+ DeclareOpInterfaceMethods<MemorySpaceOpInterface>
+ ]> {
+ let summary = "Pointer-index add operation";
+ let description = [{
+ The `ptradd` operation adds an `address` and an integer or index to
+ produce a new address.
+
+ Example:
+ ```mlir
+ %addr = ptr.ptradd %addr : !ptr.ptr<3 : i32>, %c10 : i32
+ ```
+ }];
+
+ let arguments = (ins PtrType:$base, AnySignlessIntegerOrIndex:$offset);
+ let results = (outs PtrType:$result);
+ let assemblyFormat = [{
+ $base custom<PtrType>(type($base)) `,` $offset
+ custom<IntType>(type($offset)) attr-dict
+ }];
+}
+
+#endif // PTR_OPS
diff --git a/mlir/include/mlir/Dialect/Ptr/IR/PtrTypes.h b/mlir/include/mlir/Dialect/Ptr/IR/PtrTypes.h
new file mode 100644
index 00000000000000..f32c489bae596c
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Ptr/IR/PtrTypes.h
@@ -0,0 +1,37 @@
+//===- PointerTypes.h - Pointer types ---------------------------*- C++ -*-===//
+//
+// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the Pointer dialect types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_DIALECT_PTR_IR_PTRTYPES_H
+#define MLIR_DIALECT_PTR_IR_PTRTYPES_H
+
+#include "mlir/IR/Types.h"
+#include "mlir/Interfaces/DataLayoutInterfaces.h"
+
+namespace mlir {
+namespace ptr {
+/// The positions of different values in the data layout entry for pointers.
+enum class PtrDLEntryPos { Size = 0, Abi = 1, Preferred = 2, Index = 3 };
+
+/// Returns the value that corresponds to named position `pos` from the
+/// data layout entry `attr` assuming it's a dense integer elements attribute.
+/// Returns `std::nullopt` if `pos` is not present in the entry.
+/// Currently only `PtrDLEntryPos::Index` is optional, and all other positions
+/// may be assumed to be present.
+std::optional<uint64_t> extractPointerSpecValue(Attribute attr,
+ PtrDLEntryPos pos);
+} // namespace ptr
+} // namespace mlir
+
+#define GET_TYPEDEF_CLASSES
+#include "mlir/Dialect/Ptr/IR/PtrOpsTypes.h.inc"
+
+#endif // MLIR_DIALECT_PTR_IR_PTRTYPES_H
diff --git a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
index db38e2e1bce22a..d62ce5b4e6f71d 100644
--- a/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
+++ b/mlir/include/mlir/IR/BuiltinTypeInterfaces.td
@@ -188,4 +188,29 @@ def ShapedTypeInterface : TypeInterface<"ShapedType"> {
}];
}
+//===----------------------------------------------------------------------===//
+// SharedDialectTypeInterface
+//===----------------------------------------------------------------------===//
+
+def SharedDialectTypeInterface : TypeInterface<"SharedDialectTypeInterface"> {
+ let cppNamespace = "::mlir";
+ let description = [{
+ This interface allows sharing a type between dialects, allowing custom
+ parsing and printing of a type by a foreign dialect.
+ If a dialect has a type with this interface, then it must implement the
+ dialect methods `parseType` and `printType`.
+ }];
+ let methods = [
+ InterfaceMethod<[{
+ Returns the dialect responsible for printing and parsing the type
+ instance.
+ }],
+ "Dialect&", "getSharedDialect", (ins),
+ [{}],
+ [{ return $_type.getDialect(); }]
+ >
+ ];
+}
+
+
#endif // MLIR_IR_BUILTINTYPEINTERFACES_TD_
diff --git a/mlir/include/mlir/InitAllDialects.h b/mlir/include/mlir/InitAllDialects.h
index 19a62cadaa2e04..83443cd82f8e59 100644
--- a/mlir/include/mlir/InitAllDialects.h
+++ b/mlir/include/mlir/InitAllDialects.h
@@ -60,6 +60,7 @@
#include "mlir/Dialect/OpenMP/OpenMPDialect.h"
#include "mlir/Dialect/PDL/IR/PDL.h"
#include "mlir/Dialect/PDLInterp/IR/PDLInterp.h"
+#include "mlir/Dialect/Ptr/IR/PtrDialect.h"
#include "mlir/Dialect/Quant/QuantOps.h"
#include "mlir/Dialect/SCF/IR/SCF.h"
#include "mlir/Dialect/SCF/IR/ValueBoundsOpInterfaceImpl.h"
@@ -127,6 +128,7 @@ inline void registerAllDialects(DialectRegistry ®istry) {
omp::OpenMPDialect,
pdl::PDLDialect,
pdl_interp::PDLInterpDialect,
+ ptr::PtrDialect,
quant::QuantizationDialect,
ROCDL::ROCDLDialect,
scf::SCFDialect,
diff --git a/mlir/include/mlir/Target/LLVMIR/Dialect/All.h b/mlir/include/mlir/Target/LLVMIR/Dialect/All.h
index 0b37e23e45118b..60fbc24afa186d 100644
--- a/mlir/include/mlir/Target/LLVMIR/Dialect/All.h
+++ b/mlir/include/mlir/Target/LLVMIR/Dialect/All.h
@@ -26,6 +26,7 @@
#include "mlir/Target/LLVMIR/Dialect/NVVM/NVVMToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h"
+#include "mlir/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/ROCDL/ROCDLToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/SPIRV/SPIRVToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/X86Vector/X86VectorToLLVMIRTranslation.h"
@@ -46,6 +47,7 @@ static inline void registerAllToLLVMIRTranslations(DialectRegistry ®istry) {
registerNVVMDialectTranslation(registry);
registerOpenACCDialectTranslation(registry);
registerOpenMPDialectTranslation(registry);
+ registerPtrDialectTranslation(registry);
registerROCDLDialectTranslation(registry);
registerSPIRVDialectTranslation(registry);
registerX86VectorDialectTranslation(registry);
@@ -63,6 +65,7 @@ registerAllGPUToLLVMIRTranslations(DialectRegistry ®istry) {
registerGPUDialectTranslation(registry);
registerLLVMDialectTranslation(registry);
registerNVVMDialectTranslation(registry);
+ registerPtrDialectTranslation(registry);
registerROCDLDialectTranslation(registry);
registerSPIRVDialectTranslation(registry);
diff --git a/mlir/include/mlir/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.h b/mlir/include/mlir/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.h
new file mode 100644
index 00000000000000..5dc1b0e45995ca
--- /dev/null
+++ b/mlir/include/mlir/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.h
@@ -0,0 +1,31 @@
+//===- PtrToLLVMIRTranslation.h - Ptr Dialect to LLVM IR --------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides registration calls for Ptr dialect to LLVM IR translation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MLIR_TARGET_LLVMIR_DIALECT_PTR_PTRTOLLVMIRTRANSLATION_H
+#define MLIR_TARGET_LLVMIR_DIALECT_PTR_PTRTOLLVMIRTRANSLATION_H
+
+namespace mlir {
+
+class DialectRegistry;
+class MLIRContext;
+
+/// Register the Ptr dialect and the translation from it to the LLVM IR in
+/// the given registry;
+void registerPtrDialectTranslation(DialectRegistry ®istry);
+
+/// Register the Ptr dialect and the translation from it in the registry
+/// associated with the given context.
+void registerPtrDialectTranslation(MLIRContext &context);
+
+} // namespace mlir
+
+#endif // MLIR_TARGET_LLVMIR_DIALECT_PTR_PTRTOLLVMIRTRANSLATION_H
diff --git a/mlir/lib/Dialect/CMakeLists.txt b/mlir/lib/Dialect/CMakeLists.txt
index 68776a695cac4d..281e85df3971c5 100644
--- a/mlir/lib/Dialect/CMakeLists.txt
+++ b/mlir/lib/Dialect/CMakeLists.txt
@@ -27,6 +27,7 @@ add_subdirectory(OpenACCMPCommon)
add_subdirectory(OpenMP)
add_subdirectory(PDL)
add_subdirectory(PDLInterp)
+add_subdirectory(Ptr)
add_subdirectory(Quant)
add_subdirectory(SCF)
add_subdirectory(Shape)
diff --git a/mlir/lib/Dialect/Ptr/CMakeLists.txt b/mlir/lib/Dialect/Ptr/CMakeLists.txt
new file mode 100644
index 00000000000000..f33061b2d87cff
--- /dev/null
+++ b/mlir/lib/Dialect/Ptr/CMakeLists.txt
@@ -0,0 +1 @@
+add_subdirectory(IR)
diff --git a/mlir/lib/Dialect/Ptr/IR/CMakeLists.txt b/mlir/lib/Dialect/Ptr/IR/CMakeLists.txt
new file mode 100644
index 00000000000000..4268bb29493192
--- /dev/null
+++ b/mlir/lib/Dialect/Ptr/IR/CMakeLists.txt
@@ -0,0 +1,15 @@
+add_mlir_dialect_library(
+ MLIRPtrDialect
+ PtrDialect.cpp
+ ADDITIONAL_HEADER_DIRS
+ ${PROJECT_SOURCE_DIR}/mlir/Dialect/Pointer
+ DEPENDS
+ MLIRPtrOpsIncGen
+ MLIRPtrOpsEnumsGen
+ MLIRPtrOpsAttributesIncGen
+ MLIRPtrMemorySpaceInterfacesIncGen
+ LINK_LIBS
+ PUBLIC
+ MLIRIR
+ MLIRDataLayoutInterfaces
+)
diff --git a/mlir/lib/Dialect/Ptr/IR/PtrDialect.cpp b/mlir/lib/Dialect/Ptr/IR/PtrDialect.cpp
new file mode 100644
index 00000000000000..88d89abfc81770
--- /dev/null
+++ b/mlir/lib/Dialect/Ptr/IR/PtrDialect.cpp
@@ -0,0 +1,635 @@
+//===- PtrDialect.cpp - Pointer dialect ---------------------*- C++ -*-===//
+//
+// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the Pointer dialect.
+//
+//===----------------------------------------------------------------------===//
+
+#include "mlir/Dialect/Ptr/IR/PtrOps.h"
+#include "mlir/IR/DialectImplementation.h"
+#include "mlir/IR/PatternMatch.h"
+#include "mlir/Transforms/InliningUtils.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/TypeSwitch.h"
+
+using namespace mlir;
+using namespace mlir::ptr;
+
+//===----------------------------------------------------------------------===//
+// Pointer dialect
+//===----------------------------------------------------------------------===//
+namespace {
+/// This class defines the interface for handling inlining for ptr
+/// dialect operations.
+struct PtrInlinerInterface : public DialectInlinerInterface {
+ using DialectInlinerInterface::DialectInlinerInterface;
+
+ /// All ptr dialect ops can be inlined.
+ bool isLegalToInline(Operation *, Region *, bool, IRMapping &) const final {
+ return true;
+ }
+};
+} // namespace
+
+void PtrDialect::initialize() {
+ addOperations<
+#define GET_OP_LIST
+#include "mlir/Dialect/Ptr/IR/PtrOps.cpp.inc"
+ >();
+ addTypes<
+#define GET_TYPEDEF_LIST
+#include "mlir/Dialect/Ptr/IR/PtrOpsTypes.cpp.inc"
+ >();
+ addInterfaces<PtrInlinerInterface>();
+}
+
+// Returns the underlying ptr-type or null.
+static PtrType getUnderlyingPtrType(Type ty) {
+ Type elemTy = ty;
+ if (auto vecTy = dyn_cast<VectorType>(ty))
+ elemTy = vecTy.getElementType();
+ return dyn_cast<PtrType>(elemTy);
+}
+
+// Returns a pair containing:
+// The underlying type of a vector or the type itself if it's not a vector.
+// The number of elements in the vector or an error code if the type is not
+// supported.
+static std::pair<Type, int64_t> getVecOrScalarInfo(Type ty) {
+ if (auto vecTy = dyn_cast<VectorType>(ty)) {
+ auto elemTy = vecTy.getElementType();
+ // Vectors of rank greater than one or with scalable dimensions are not
+ // supported.
+ if (vecTy.getRank() != 1)
+ return {elemTy, -1};
+ else if (vecTy.getScalableDims()[0])
+ return {elemTy, -2};
+ return {elemTy, vecTy.getShape()[0]};
+ }
+ // `ty` is a scalar type.
+ return {ty, 0};
+}
+
+LogicalResult mlir::ptr::verifyPtrCastDefaultImpl(Operation *op, Type tgt,
+ Type src) {
+ std::pair<Type, int64_t> tgtInfo = getVecOrScalarInfo(tgt);
+ std::pair<Type, int64_t> srcInfo = getVecOrScalarInfo(src);
+ if (!isa<PtrType>(tgtInfo.first) || !isa<PtrType>(srcInfo.first))
+ return op->emitError() << "invalid ptr-like operand";
+ // Check shape validity.
+ if (tgtInfo.second == -1 || srcInfo.second == -1)
+ return op->emitError() << "vectors of rank != 1 are not supported";
+ if (tgtInfo.second == -2 || srcInfo.second == -2)
+ return op->emitError()
+ << "vectors with scalable dimensions are not supported";
+ if (tgtInfo.second != srcInfo.second)
+ return op->emitError() << "incompatible operand shapes";
+ return success();
+}
+
+LogicalResult mlir::ptr::verifyIntCastTypesDefaultImpl(Operation *op,
+ Type intLikeTy,
+ Type ptrLikeTy) {
+ // Check int-like type.
+ std::pair<Type, int64_t> intInfo = getVecOrScalarInfo(intLikeTy);
+ if (!intInfo.first.isSignlessIntOrIndex())
+ return op->emitError() << "invalid int-like type";
+ // Check ptr-like type.
+ std::pair<Type, int64_t> ptrInfo = getVecOrScalarInfo(ptrLikeTy);
+ if (!isa<PtrType>(ptrInfo.first))
+ return op->emitError() << "invalid ptr-like type";
+ // Check shape validity.
+ if (intInfo.second == -1 || ptrInfo.second == -1)
+ return op->emitError() << "vectors of rank != 1 are not supported";
+ if (intInfo.second == -2 || ptrInfo.second == -2)
+ return op->emitError()
+ << "vectors with scalable dimensions are not supported";
+ if (intInfo.second != ptrInfo.second)
+ return op->emitError() << "incompatible operand shapes";
+ return success();
+}
+
+DeletionKind mlir::ptr::removeStoreBlockingUsesDefaultImpl() {
+ return DeletionKind::Delete;
+}
+
+//===----------------------------------------------------------------------===//
+// Pointer type
+//===----------------------------------------------------------------------===//
+
+constexpr const static unsigned kDefaultPointerSizeBits = 64;
+constexpr const static unsigned kBitsInByte = 8;
+constexpr const static unsigned kDefaultPointerAlignment = 8;
+
+int64_t PtrType::getAddressSpace() const {
+ if (auto intAttr = llvm::dyn_cast_or_null<IntegerAttr>(getMemorySpace()))
+ return intAttr.getInt();
+ else if (auto ms = llvm::dyn_cast_or_null<MemorySpaceAttrInterface>(
+ getMemorySpace()))
+ return ms.getAddressSpace();
+ return 0;
+}
+
+Dialect &PtrType::getSharedDialect() const {
+ if (auto memSpace =
+ llvm::dyn_cast_or_null<MemorySpaceAttrInterface>(getMemorySpace());
+ memSpace && memSpace.getModelOwner())
+ return *memSpace.getModelOwner();
+ return getDialect();
+}
+
+Attribute PtrType::getDefaultMemorySpace() const {
+ if (auto ms =
+ llvm::dyn_cast_or_null<MemorySpaceAttrInterface>(getMemorySpace()))
+ return ms.getDefaultMemorySpace();
+ return nullptr;
+}
+
+std::optional<uint64_t> mlir::ptr::extractPointerSpecValue(Attribute attr,
+ PtrDLEntryPos pos) {
+ auto spec = cast<DenseIntElementsAttr>(attr);
+ auto idx = static_cast<int64_t>(pos);
+ if (idx >= spec.size())
+ return std::nullopt;
+ return spec.getValues<uint64_t>()[idx];
+}
+
+/// Returns the part of the data layout entry that corresponds to `pos` for the
+/// given `type` by interpreting the list of entries `params`. For the pointer
+/// type in the default address space, returns the default value if the entries
+/// do not provide a custom one, for other address spaces returns std::nullopt.
+static std::optional<unsigned>
+getPointerDataLayoutEntry(DataLayoutEntryListRef params, PtrType type,
+ PtrDLEntryPos pos) {
+ // First, look for the entry for the pointer in the current address space.
+ Attribute currentEntry;
+ for (DataLayoutEntryInterface entry : params) {
+ if (!entry.isTypeEntry())
+ continue;
+ if (llvm::cast<PtrType>(entry.getKey().get<Type>()).getMemorySpace() ==
+ type.getMemorySpace()) {
+ currentEntry = entry.getValue();
+ break;
+ }
+ }
+ if (currentEntry) {
+ return *extractPointerSpecValue(currentEntry, pos) /
+ (pos == PtrDLEntryPos::Size ? 1 : kBitsInByte);
+ }
+
+ // If not found, and this is the pointer to the default memory space, assume
+ // 64-bit pointers.
+ if (type.getAddressSpace() == 0) {
+ return pos == PtrDLEntryPos::Size ? kDefaultPointerSizeBits
+ : kDefaultPointerAlignment;
+ }
+
+ return std::nullopt;
+}
+
+llvm::TypeSize PtrType::getTypeSizeInBits(const DataLayout &dataLayout,
+ DataLayoutEntryListRef params) const {
+ if (std::optional<uint64_t> size =
+ getPointerDataLayoutEntry(params, *this, PtrDLEntryPos::Size))
+ return llvm::TypeSize::getFixed(*size);
+
+ // For other memory spaces, use the size of the pointer to the default memory
+ // space.
+ return dataLayout.getTypeSizeInBits(
+ get(getContext(), getDefaultMemorySpace()));
+}
+
+uint64_t PtrType::getABIAlignment(const DataLayout &dataLayout,
+ DataLayoutEntryListRef params) const {
+ if (std::optional<uint64_t> alignment =
+ getPointerDataLayoutEntry(params, *this, PtrDLEntryPos::Abi))
+ return *alignment;
+
+ return dataLayout.getTypeABIAlignment(
+ get(getContext(), getDefaultMemorySpace()));
+}
+
+uint64_t PtrType::getPreferredAlignment(const DataLayout &dataLayout,
+ DataLayoutEntryListRef params) const {
+ if (std::optional<uint64_t> alignment =
+ getPointerDataLayoutEntry(params, *this, PtrDLEntryPos::Preferred))
+ return *alignment;
+
+ return dataLayout.getTypePreferredAlignment(
+ get(getContext(), getDefaultMemorySpace()));
+}
+
+bool PtrType::areCompatible(DataLayoutEntryListRef oldLayout,
+ DataLayoutEntryListRef newLayout) const {
+ for (DataLayoutEntryInterface newEntry : newLayout) {
+ if (!newEntry.isTypeEntry())
+ continue;
+ unsigned size = kDefaultPointerSizeBits;
+ unsigned abi = kDefaultPointerAlignment;
+ auto newType = llvm::cast<PtrType>(newEntry.getKey().get<Type>());
+ const auto *it =
+ llvm::find_if(oldLayout, [&](DataLayoutEntryInterface entry) {
+ if (auto type = llvm::dyn_cast_if_present<Type>(entry.getKey())) {
+ return llvm::cast<PtrType>(type).getMemorySpace() ==
+ newType.getMemorySpace();
+ }
+ return false;
+ });
+ if (it == oldLayout.end()) {
+ llvm::find_if(oldLayout, [&](DataLayoutEntryInterface entry) {
+ if (auto type = llvm::dyn_cast_if_present<Type>(entry.getKey())) {
+ return llvm::cast<PtrType>(type).getAddressSpace() == 0;
+ }
+ return false;
+ });
+ }
+ if (it != oldLayout.end()) {
+ size = *extractPointerSpecValue(*it, PtrDLEntryPos::Size);
+ abi = *extractPointerSpecValue(*it, PtrDLEntryPos::Abi);
+ }
+
+ Attribute newSpec = llvm::cast<DenseIntElementsAttr>(newEntry.getValue());
+ unsigned newSize = *extractPointerSpecValue(newSpec, PtrDLEntryPos::Size);
+ unsigned newAbi = *extractPointerSpecValue(newSpec, PtrDLEntryPos::Abi);
+ if (size != newSize || abi < newAbi || abi % newAbi != 0)
+ return false;
+ }
+ return true;
+}
+
+LogicalResult PtrType::verifyEntries(DataLayoutEntryListRef entries,
+ Location loc) const {
+ for (DataLayoutEntryInterface entry : entries) {
+ if (!entry.isTypeEntry())
+ continue;
+ auto key = entry.getKey().get<Type>();
+ auto values = llvm::dyn_cast<DenseIntElementsAttr>(entry.getValue());
+ if (!values || (values.size() != 3 && values.size() != 4)) {
+ return emitError(loc)
+ << "expected layout attribute for " << key
+ << " to be a dense integer elements attribute with 3 or 4 "
+ "elements";
+ }
+ if (!values.getElementType().isInteger(64))
+ return emitError(loc) << "expected i64 parameters for " << key;
+
+ if (extractPointerSpecValue(values, PtrDLEntryPos::Abi) >
+ extractPointerSpecValue(values, PtrDLEntryPos::Preferred)) {
+ return emitError(loc) << "preferred alignment is expected to be at least "
+ "as large as ABI alignment";
+ }
+ }
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
+// Pointer operations.
+//===----------------------------------------------------------------------===//
+namespace {
+ParseResult parsePtrType(OpAsmParser &parser, Type &ty) {
+ if (succeeded(parser.parseOptionalColon()) && parser.parseType(ty))
+ return parser.emitError(parser.getNameLoc(), "expected a type");
+ if (!ty)
+ ty = parser.getBuilder().getType<PtrType>();
+ return success();
+}
+void printPtrType(OpAsmPrinter &p, Operation *op, PtrType ty) {
+ if (ty.getMemorySpace() != nullptr)
+ p << " : " << ty;
+}
+
+ParseResult parseIntType(OpAsmParser &parser, Type &ty) {
+ if (succeeded(parser.parseOptionalColon()) && parser.parseType(ty))
+ return parser.emitError(parser.getNameLoc(), "expected a type");
+ if (!ty)
+ ty = parser.getBuilder().getIndexType();
+ return success();
+}
+void printIntType(OpAsmPrinter &p, Operation *op, Type ty) {
+ if (!ty.isIndex())
+ p << " : " << ty;
+}
+} // namespace
+
+//===----------------------------------------------------------------------===//
+// AtomicRMWOp
+//===----------------------------------------------------------------------===//
+LogicalResult AtomicRMWOp::verify() {
+ return getMemorySpace().verifyAtomicRMW(getAtomicOpInfo(), getBinOp());
+}
+
+MemorySpace AtomicRMWOp::getMemorySpace() {
+ return MemorySpace(getPtr().getType().getMemorySpace());
+}
+
+void AtomicRMWOp::build(OpBuilder &builder, OperationState &state,
+ AtomicBinOp binOp, Value ptr, Value val,
+ AtomicOrdering ordering, StringRef syncscope,
+ unsigned alignment, bool isVolatile) {
+ build(builder, state, val.getType(), binOp, ptr, val, ordering,
+ !syncscope.empty() ? builder.getStringAttr(syncscope) : nullptr,
+ alignment ? builder.getI64IntegerAttr(alignment) : nullptr, isVolatile);
+}
+
+//===----------------------------------------------------------------------===//
+// AtomicCmpXchgOp
+//===----------------------------------------------------------------------===//
+LogicalResult AtomicCmpXchgOp::verify() {
+ return getMemorySpace().verifyAtomicAtomicCmpXchg(getAtomicOpInfo(),
+ getFailureOrdering());
+}
+
+MemorySpace AtomicCmpXchgOp::getMemorySpace() {
+ return MemorySpace(getPtr().getType().getMemorySpace());
+}
+
+//===----------------------------------------------------------------------===//
+// LoadOp
+//===----------------------------------------------------------------------===//
+bool ptr::LoadOp::loadsFrom(const MemorySlot &slot) {
+ return getAddr() == slot.ptr;
+}
+
+bool ptr::LoadOp::storesTo(const MemorySlot &slot) { return false; }
+
+Value ptr::LoadOp::getStored(const MemorySlot &slot, RewriterBase &rewriter) {
+ llvm_unreachable("getStored should not be called on LoadOp");
+}
+
+bool LoadOp::canUsesBeRemoved(const MemorySlot &slot,
+ const SmallPtrSetImpl<OpOperand *> &blockingUses,
+ SmallVectorImpl<OpOperand *> &newBlockingUses) {
+ if (blockingUses.size() != 1)
+ return false;
+ Value blockingUse = (*blockingUses.begin())->get();
+ // If the blocking use is the slot ptr itself, there will be enough
+ // context to reconstruct the result of the load at removal time, so it can
+ // be removed (provided it loads the exact stored value and is not
+ // volatile).
+ return blockingUse == slot.ptr && getAddr() == slot.ptr &&
+ getResult().getType() == slot.elemType && !getVolatile_();
+}
+
+DeletionKind
+LoadOp::removeBlockingUses(const MemorySlot &slot,
+ const SmallPtrSetImpl<OpOperand *> &blockingUses,
+ RewriterBase &rewriter, Value reachingDefinition) {
+ // `canUsesBeRemoved` checked this blocking use must be the loaded slot
+ // pointer.
+ rewriter.replaceAllUsesWith(getResult(), reachingDefinition);
+ return DeletionKind::Delete;
+}
+
+LogicalResult
+LoadOp::ensureOnlySafeAccesses(const MemorySlot &slot,
+ SmallVectorImpl<MemorySlot> &mustBeSafelyUsed) {
+ return success(getAddr() != slot.ptr || getType() == slot.elemType);
+}
+
+void LoadOp::getEffects(
+ SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
+ &effects) {
+ effects.emplace_back(MemoryEffects::Read::get(), getAddr());
+ // Volatile operations can have target-specific read-write effects on
+ // memory besides the one referred to by the pointer operand.
+ // Similarly, atomic operations that are monotonic or stricter cause
+ // synchronization that from a language point-of-view, are arbitrary
+ // read-writes into memory.
+ if (getVolatile_() || (getOrdering() != AtomicOrdering::not_atomic &&
+ getOrdering() != AtomicOrdering::unordered)) {
+ effects.emplace_back(MemoryEffects::Write::get());
+ effects.emplace_back(MemoryEffects::Read::get());
+ }
+}
+
+MemorySpace LoadOp::getMemorySpace() {
+ return MemorySpace(getAddr().getType().getMemorySpace());
+}
+
+LogicalResult LoadOp::verify() {
+ MemorySpace ms = getMemorySpace();
+ if (!ms.isLoadableType(getRes().getType()))
+ return emitError("type is not loadable");
+ return ms.verifyCompatibleAtomicOp(
+ getAtomicOpInfo(), {AtomicOrdering::release, AtomicOrdering::acq_rel});
+}
+
+void LoadOp::build(OpBuilder &builder, OperationState &state, Type type,
+ Value addr, unsigned alignment, bool isVolatile,
+ bool isNonTemporal, AtomicOrdering ordering,
+ StringRef syncscope) {
+ build(builder, state, type, addr,
+ alignment ? builder.getI64IntegerAttr(alignment) : nullptr, isVolatile,
+ isNonTemporal, ordering,
+ syncscope.empty() ? nullptr : builder.getStringAttr(syncscope));
+}
+
+//===----------------------------------------------------------------------===//
+// StoreOp
+//===----------------------------------------------------------------------===//
+bool StoreOp::canUsesBeRemoved(const MemorySlot &slot,
+ const SmallPtrSetImpl<OpOperand *> &blockingUses,
+ SmallVectorImpl<OpOperand *> &newBlockingUses) {
+ if (blockingUses.size() != 1)
+ return false;
+ Value blockingUse = (*blockingUses.begin())->get();
+ // If the blocking use is the slot ptr itself, dropping the store is
+ // fine, provided we are currently promoting its target value. Don't allow a
+ // store OF the slot pointer, only INTO the slot pointer.
+ return blockingUse == slot.ptr && getAddr() == slot.ptr &&
+ getValue() != slot.ptr && getValue().getType() == slot.elemType &&
+ !getVolatile_();
+}
+
+DeletionKind
+StoreOp::removeBlockingUses(const MemorySlot &slot,
+ const SmallPtrSetImpl<OpOperand *> &blockingUses,
+ RewriterBase &rewriter, Value reachingDefinition) {
+ return getMemorySpace().removeStoreBlockingUses(
+ *this, getValue(), slot, blockingUses, rewriter, reachingDefinition);
+}
+
+LogicalResult
+StoreOp::ensureOnlySafeAccesses(const MemorySlot &slot,
+ SmallVectorImpl<MemorySlot> &mustBeSafelyUsed) {
+ return success(getAddr() != slot.ptr ||
+ getValue().getType() == slot.elemType);
+}
+
+bool StoreOp::loadsFrom(const MemorySlot &slot) { return false; }
+
+bool StoreOp::storesTo(const MemorySlot &slot) { return getAddr() == slot.ptr; }
+
+Value StoreOp::getStored(const MemorySlot &slot, RewriterBase &rewriter) {
+ return getValue();
+}
+
+void StoreOp::getEffects(
+ SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
+ &effects) {
+ effects.emplace_back(MemoryEffects::Write::get(), getAddr());
+ // Volatile operations can have target-specific read-write effects on
+ // memory besides the one referred to by the pointer operand.
+ // Similarly, atomic operations that are monotonic or stricter cause
+ // synchronization that from a language point-of-view, are arbitrary
+ // read-writes into memory.
+ if (getVolatile_() || (getOrdering() != AtomicOrdering::not_atomic &&
+ getOrdering() != AtomicOrdering::unordered)) {
+ effects.emplace_back(MemoryEffects::Write::get());
+ effects.emplace_back(MemoryEffects::Read::get());
+ }
+}
+
+MemorySpace StoreOp::getMemorySpace() {
+ return MemorySpace(getAddr().getType().getMemorySpace());
+}
+
+LogicalResult StoreOp::verify() {
+ MemorySpace ms = getMemorySpace();
+ if (!ms.isStorableType(getValue().getType()))
+ return emitError("type is not storable");
+ return ms.verifyCompatibleAtomicOp(
+ getAtomicOpInfo(), {AtomicOrdering::acquire, AtomicOrdering::acq_rel});
+}
+
+void StoreOp::build(OpBuilder &builder, OperationState &state, Value value,
+ Value addr, unsigned alignment, bool isVolatile,
+ bool isNonTemporal, AtomicOrdering ordering,
+ StringRef syncscope) {
+ build(builder, state, value, addr,
+ alignment ? builder.getI64IntegerAttr(alignment) : nullptr, isVolatile,
+ isNonTemporal, ordering,
+ syncscope.empty() ? nullptr : builder.getStringAttr(syncscope));
+}
+
+//===----------------------------------------------------------------------===//
+// AddrSpaceCastOp
+//===----------------------------------------------------------------------===//
+LogicalResult AddrSpaceCastOp::verify() {
+ return getMemorySpace().verifyPtrCast(*this, getRes().getType(),
+ getArg().getType());
+}
+
+MemorySpace AddrSpaceCastOp::getMemorySpace() {
+ if (auto ptrTy = getUnderlyingPtrType(getArg().getType()))
+ return MemorySpace(ptrTy.getMemorySpace());
+ return MemorySpace();
+}
+
+static bool forwardToUsers(Operation *op,
+ SmallVectorImpl<OpOperand *> &newBlockingUses) {
+ for (Value result : op->getResults())
+ for (OpOperand &use : result.getUses())
+ newBlockingUses.push_back(&use);
+ return true;
+}
+
+bool AddrSpaceCastOp::canUsesBeRemoved(
+ const SmallPtrSetImpl<OpOperand *> &blockingUses,
+ SmallVectorImpl<OpOperand *> &newBlockingUses) {
+ return forwardToUsers(*this, newBlockingUses);
+}
+
+DeletionKind AddrSpaceCastOp::removeBlockingUses(
+ const SmallPtrSetImpl<OpOperand *> &blockingUses, RewriterBase &rewriter) {
+ return DeletionKind::Delete;
+}
+
+OpFoldResult AddrSpaceCastOp::fold(FoldAdaptor adaptor) {
+ // addrcast(x : T0, T0) -> x
+ if (getArg().getType() == getType())
+ return getArg();
+ // addrcast(addrcast(x : T0, T1), T0) -> x
+ if (auto prev = getArg().getDefiningOp<AddrSpaceCastOp>())
+ if (prev.getArg().getType() == getType())
+ return prev.getArg();
+ return {};
+}
+
+//===----------------------------------------------------------------------===//
+// IntToPtrOp
+//===----------------------------------------------------------------------===//
+LogicalResult IntToPtrOp::verify() {
+ return getMemorySpace().verifyIntCastTypes(*this, getArg().getType(),
+ getRes().getType());
+}
+
+MemorySpace IntToPtrOp::getMemorySpace() {
+ if (auto ptrTy = getUnderlyingPtrType(getRes().getType()))
+ return MemorySpace(ptrTy.getMemorySpace());
+ return MemorySpace();
+}
+
+//===----------------------------------------------------------------------===//
+// PtrToIntOp
+//===----------------------------------------------------------------------===//
+LogicalResult PtrToIntOp::verify() {
+ return getMemorySpace().verifyIntCastTypes(*this, getRes().getType(),
+ getArg().getType());
+}
+
+MemorySpace PtrToIntOp::getMemorySpace() {
+ if (auto ptrTy = getUnderlyingPtrType(getArg().getType()))
+ return MemorySpace(ptrTy.getMemorySpace());
+ return MemorySpace();
+}
+
+//===----------------------------------------------------------------------===//
+// Constant Op
+//===----------------------------------------------------------------------===//
+void ConstantOp::build(OpBuilder &odsBuilder, OperationState &odsState,
+ int64_t value, Attribute addressSpace) {
+ build(odsBuilder, odsState, odsBuilder.getType<PtrType>(addressSpace),
+ odsBuilder.getIndexAttr(value));
+}
+
+void ConstantOp::getAsmResultNames(OpAsmSetValueNameFn setNameFn) {
+ SmallString<32> buffer;
+ llvm::raw_svector_ostream name(buffer);
+ name << "ptr" << getValueAttr().getValue();
+ setNameFn(getResult(), name.str());
+}
+
+OpFoldResult ConstantOp::fold(FoldAdaptor adaptor) {
+ return adaptor.getValueAttr();
+}
+
+MemorySpace ConstantOp::getMemorySpace() {
+ return MemorySpace(getResult().getType().getMemorySpace());
+}
+
+//===----------------------------------------------------------------------===//
+// TypeOffset Op
+//===----------------------------------------------------------------------===//
+OpFoldResult TypeOffsetOp::fold(FoldAdaptor adaptor) {
+ return adaptor.getBaseTypeAttr();
+}
+
+//===----------------------------------------------------------------------===//
+// PtrAdd Op
+//===----------------------------------------------------------------------===//
+MemorySpace PtrAddOp::getMemorySpace() {
+ return MemorySpace(getResult().getType().getMemorySpace());
+}
+
+#include "mlir/Dialect/Ptr/IR/PtrOpsDialect.cpp.inc"
+
+#include "mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.cpp.inc"
+
+#include "mlir/Dialect/Ptr/IR/MemorySpaceAttrInterfaces.cpp.inc"
+
+#include "mlir/Dialect/Ptr/IR/PtrOpsEnums.cpp.inc"
+
+#define GET_ATTRDEF_CLASSES
+#include "mlir/Dialect/Ptr/IR/PtrOpsAttributes.cpp.inc"
+
+#define GET_TYPEDEF_CLASSES
+#include "mlir/Dialect/Ptr/IR/PtrOpsTypes.cpp.inc"
+
+#define GET_OP_CLASSES
+#include "mlir/Dialect/Ptr/IR/PtrOps.cpp.inc"
diff --git a/mlir/lib/IR/AsmPrinter.cpp b/mlir/lib/IR/AsmPrinter.cpp
index 1f7cbf349255d5..bc6a2aedd0e934 100644
--- a/mlir/lib/IR/AsmPrinter.cpp
+++ b/mlir/lib/IR/AsmPrinter.cpp
@@ -2715,7 +2715,10 @@ void AsmPrinter::Impl::printDialectAttribute(Attribute attr) {
}
void AsmPrinter::Impl::printDialectType(Type type) {
- auto &dialect = type.getDialect();
+ Dialect *dialectPtr = &type.getDialect();
+ if (auto ifce = dyn_cast<SharedDialectTypeInterface>(type))
+ dialectPtr = &ifce.getSharedDialect();
+ auto &dialect = *dialectPtr;
// Ask the dialect to serialize the type to a string.
std::string typeName;
diff --git a/mlir/lib/Target/LLVMIR/CMakeLists.txt b/mlir/lib/Target/LLVMIR/CMakeLists.txt
index 94280a2ec9012b..1de557c1ea49bb 100644
--- a/mlir/lib/Target/LLVMIR/CMakeLists.txt
+++ b/mlir/lib/Target/LLVMIR/CMakeLists.txt
@@ -37,6 +37,7 @@ add_mlir_translation_library(MLIRTargetLLVMIRExport
LINK_LIBS PUBLIC
MLIRDLTIDialect
MLIRLLVMDialect
+ MLIRPtrDialect
MLIRLLVMIRTransforms
MLIRTranslateLib
MLIRTransformUtils
@@ -57,6 +58,7 @@ add_mlir_translation_library(MLIRToLLVMIRTranslationRegistration
MLIRNVVMToLLVMIRTranslation
MLIROpenACCToLLVMIRTranslation
MLIROpenMPToLLVMIRTranslation
+ MLIRPtrToLLVMIRTranslation
MLIRROCDLToLLVMIRTranslation
MLIRSPIRVToLLVMIRTranslation
)
diff --git a/mlir/lib/Target/LLVMIR/Dialect/CMakeLists.txt b/mlir/lib/Target/LLVMIR/Dialect/CMakeLists.txt
index c9d916d8a5d82d..bcbcdf14278341 100644
--- a/mlir/lib/Target/LLVMIR/Dialect/CMakeLists.txt
+++ b/mlir/lib/Target/LLVMIR/Dialect/CMakeLists.txt
@@ -8,6 +8,7 @@ add_subdirectory(LLVMIR)
add_subdirectory(NVVM)
add_subdirectory(OpenACC)
add_subdirectory(OpenMP)
+add_subdirectory(Ptr)
add_subdirectory(ROCDL)
add_subdirectory(SPIRV)
add_subdirectory(X86Vector)
diff --git a/mlir/lib/Target/LLVMIR/Dialect/Ptr/CMakeLists.txt b/mlir/lib/Target/LLVMIR/Dialect/Ptr/CMakeLists.txt
new file mode 100644
index 00000000000000..c40b47bbdff339
--- /dev/null
+++ b/mlir/lib/Target/LLVMIR/Dialect/Ptr/CMakeLists.txt
@@ -0,0 +1,12 @@
+add_mlir_translation_library(MLIRPtrToLLVMIRTranslation
+ PtrToLLVMIRTranslation.cpp
+
+ LINK_COMPONENTS
+ Core
+
+ LINK_LIBS PUBLIC
+ MLIRIR
+ MLIRPtrDialect
+ MLIRSupport
+ MLIRTargetLLVMIRExport
+ )
\ No newline at end of file
diff --git a/mlir/lib/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.cpp b/mlir/lib/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.cpp
new file mode 100644
index 00000000000000..97682666adc4c7
--- /dev/null
+++ b/mlir/lib/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.cpp
@@ -0,0 +1,338 @@
+//===- PtrToLLVMIRTranslation.cpp - Translate Ptr dialect to LLVM IR ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a translation between the MLIR Ptr dialect and LLVM IR.
+//
+//===----------------------------------------------------------------------===//
+#include "mlir/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.h"
+#include "mlir/Dialect/LLVMIR/LLVMInterfaces.h"
+#include "mlir/Dialect/Ptr/IR/PtrOps.h"
+#include "mlir/Target/LLVMIR/LLVMTranslationInterface.h"
+#include "mlir/Target/LLVMIR/ModuleTranslation.h"
+#include "llvm/ADT/TypeSwitch.h"
+
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/IR/MDBuilder.h"
+
+using namespace mlir;
+using namespace mlir::ptr;
+
+namespace {
+::llvm::AtomicRMWInst::BinOp convertAtomicBinOpToLLVM(AtomicBinOp value) {
+ switch (value) {
+ case AtomicBinOp::xchg:
+ return ::llvm::AtomicRMWInst::BinOp::Xchg;
+ case AtomicBinOp::add:
+ return ::llvm::AtomicRMWInst::BinOp::Add;
+ case AtomicBinOp::sub:
+ return ::llvm::AtomicRMWInst::BinOp::Sub;
+ case AtomicBinOp::_and:
+ return ::llvm::AtomicRMWInst::BinOp::And;
+ case AtomicBinOp::nand:
+ return ::llvm::AtomicRMWInst::BinOp::Nand;
+ case AtomicBinOp::_or:
+ return ::llvm::AtomicRMWInst::BinOp::Or;
+ case AtomicBinOp::_xor:
+ return ::llvm::AtomicRMWInst::BinOp::Xor;
+ case AtomicBinOp::max:
+ return ::llvm::AtomicRMWInst::BinOp::Max;
+ case AtomicBinOp::min:
+ return ::llvm::AtomicRMWInst::BinOp::Min;
+ case AtomicBinOp::umax:
+ return ::llvm::AtomicRMWInst::BinOp::UMax;
+ case AtomicBinOp::umin:
+ return ::llvm::AtomicRMWInst::BinOp::UMin;
+ case AtomicBinOp::fadd:
+ return ::llvm::AtomicRMWInst::BinOp::FAdd;
+ case AtomicBinOp::fsub:
+ return ::llvm::AtomicRMWInst::BinOp::FSub;
+ case AtomicBinOp::fmax:
+ return ::llvm::AtomicRMWInst::BinOp::FMax;
+ case AtomicBinOp::fmin:
+ return ::llvm::AtomicRMWInst::BinOp::FMin;
+ case AtomicBinOp::uinc_wrap:
+ return ::llvm::AtomicRMWInst::BinOp::UIncWrap;
+ case AtomicBinOp::udec_wrap:
+ return ::llvm::AtomicRMWInst::BinOp::UDecWrap;
+ }
+ llvm_unreachable("unknown AtomicBinOp type");
+}
+
+::llvm::AtomicOrdering convertAtomicOrderingToLLVM(AtomicOrdering value) {
+ switch (value) {
+ case AtomicOrdering::not_atomic:
+ return ::llvm::AtomicOrdering::NotAtomic;
+ case AtomicOrdering::unordered:
+ return ::llvm::AtomicOrdering::Unordered;
+ case AtomicOrdering::monotonic:
+ return ::llvm::AtomicOrdering::Monotonic;
+ case AtomicOrdering::acquire:
+ return ::llvm::AtomicOrdering::Acquire;
+ case AtomicOrdering::release:
+ return ::llvm::AtomicOrdering::Release;
+ case AtomicOrdering::acq_rel:
+ return ::llvm::AtomicOrdering::AcquireRelease;
+ case AtomicOrdering::seq_cst:
+ return ::llvm::AtomicOrdering::SequentiallyConsistent;
+ }
+ llvm_unreachable("unknown AtomicOrdering type");
+}
+
+//===----------------------------------------------------------------------===//
+// AtomicRMWOp
+//===----------------------------------------------------------------------===//
+LogicalResult convertAtomicRMWOp(AtomicRMWOp op, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ auto *inst = builder.CreateAtomicRMW(
+ convertAtomicBinOpToLLVM(op.getBinOp()),
+ moduleTranslation.lookupValue(op.getPtr()),
+ moduleTranslation.lookupValue(op.getVal()), llvm::MaybeAlign(),
+ convertAtomicOrderingToLLVM(op.getOrdering()));
+ moduleTranslation.mapValue(op.getRes()) = inst;
+ inst->setVolatile(op.getVolatile_());
+ if (op.getSyncscope().has_value()) {
+ llvm::LLVMContext &llvmContext = builder.getContext();
+ inst->setSyncScopeID(
+ llvmContext.getOrInsertSyncScopeID(*op.getSyncscope()));
+ }
+ if (op.getAlignment().has_value()) {
+ auto align = *op.getAlignment();
+ if (align != 0)
+ inst->setAlignment(llvm::Align(align));
+ }
+ if (auto accessGroup =
+ dyn_cast<LLVM::AccessGroupOpInterface>(op.getOperation()))
+ moduleTranslation.setAccessGroupsMetadata(accessGroup, inst);
+ if (auto aa = dyn_cast<LLVM::AliasAnalysisOpInterface>(op.getOperation())) {
+ moduleTranslation.setAliasScopeMetadata(aa, inst);
+ moduleTranslation.setTBAAMetadata(aa, inst);
+ }
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
+// AtomicCmpXchgOp
+//===----------------------------------------------------------------------===//
+LogicalResult
+convertAtomicCmpXchgOp(AtomicCmpXchgOp op, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ auto *inst = builder.CreateAtomicCmpXchg(
+ moduleTranslation.lookupValue(op.getPtr()),
+ moduleTranslation.lookupValue(op.getCmp()),
+ moduleTranslation.lookupValue(op.getVal()), llvm::MaybeAlign(),
+ convertAtomicOrderingToLLVM(op.getSuccessOrdering()),
+ convertAtomicOrderingToLLVM(op.getFailureOrdering()));
+ moduleTranslation.mapValue(op.getRes()) = inst;
+ inst->setWeak(op.getWeak());
+ inst->setVolatile(op.getVolatile_());
+ if (op.getSyncscope().has_value()) {
+ llvm::LLVMContext &llvmContext = builder.getContext();
+ inst->setSyncScopeID(
+ llvmContext.getOrInsertSyncScopeID(*op.getSyncscope()));
+ }
+ if (op.getAlignment().has_value()) {
+ auto align = *op.getAlignment();
+ if (align != 0)
+ inst->setAlignment(llvm::Align(align));
+ }
+ if (auto accessGroup =
+ dyn_cast<LLVM::AccessGroupOpInterface>(op.getOperation()))
+ moduleTranslation.setAccessGroupsMetadata(accessGroup, inst);
+ if (auto aa = dyn_cast<LLVM::AliasAnalysisOpInterface>(op.getOperation())) {
+ moduleTranslation.setAliasScopeMetadata(aa, inst);
+ moduleTranslation.setTBAAMetadata(aa, inst);
+ }
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
+// LoadOp
+//===----------------------------------------------------------------------===//
+LogicalResult convertLoadOp(LoadOp op, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ auto *inst = builder.CreateLoad(
+ moduleTranslation.convertType(op.getResult().getType()),
+ moduleTranslation.lookupValue(op.getAddr()), op.getVolatile_());
+ moduleTranslation.mapValue(op.getRes()) = inst;
+ inst->setAtomic(convertAtomicOrderingToLLVM(op.getOrdering()));
+ if (op.getSyncscope().has_value()) {
+ llvm::LLVMContext &llvmContext = builder.getContext();
+ inst->setSyncScopeID(
+ llvmContext.getOrInsertSyncScopeID(*op.getSyncscope()));
+ }
+ if (op.getAlignment().has_value()) {
+ auto align = *op.getAlignment();
+ if (align != 0)
+ inst->setAlignment(llvm::Align(align));
+ }
+ if (op.getNontemporal()) {
+ llvm::MDNode *metadata = llvm::MDNode::get(
+ inst->getContext(), llvm::ConstantAsMetadata::get(builder.getInt32(1)));
+ inst->setMetadata(llvm::LLVMContext::MD_nontemporal, metadata);
+ }
+ if (auto accessGroup =
+ dyn_cast<LLVM::AccessGroupOpInterface>(op.getOperation()))
+ moduleTranslation.setAccessGroupsMetadata(accessGroup, inst);
+ if (auto aa = dyn_cast<LLVM::AliasAnalysisOpInterface>(op.getOperation())) {
+ moduleTranslation.setAliasScopeMetadata(aa, inst);
+ moduleTranslation.setTBAAMetadata(aa, inst);
+ }
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
+// StoreOp
+//===----------------------------------------------------------------------===//
+LogicalResult convertStoreOp(StoreOp op, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ auto *inst = builder.CreateStore(moduleTranslation.lookupValue(op.getValue()),
+ moduleTranslation.lookupValue(op.getAddr()),
+ op.getVolatile_());
+ inst->setAtomic(convertAtomicOrderingToLLVM(op.getOrdering()));
+ if (op.getSyncscope().has_value()) {
+ llvm::LLVMContext &llvmContext = builder.getContext();
+ inst->setSyncScopeID(
+ llvmContext.getOrInsertSyncScopeID(*op.getSyncscope()));
+ }
+ if (op.getAlignment().has_value()) {
+ auto align = *op.getAlignment();
+ if (align != 0)
+ inst->setAlignment(llvm::Align(align));
+ }
+ if (op.getNontemporal()) {
+ llvm::MDNode *metadata = llvm::MDNode::get(
+ inst->getContext(), llvm::ConstantAsMetadata::get(builder.getInt32(1)));
+ inst->setMetadata(llvm::LLVMContext::MD_nontemporal, metadata);
+ }
+ if (auto accessGroup =
+ dyn_cast<LLVM::AccessGroupOpInterface>(op.getOperation()))
+ moduleTranslation.setAccessGroupsMetadata(accessGroup, inst);
+ if (auto aa = dyn_cast<LLVM::AliasAnalysisOpInterface>(op.getOperation())) {
+ moduleTranslation.setAliasScopeMetadata(aa, inst);
+ moduleTranslation.setTBAAMetadata(aa, inst);
+ }
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
+// AddrSpaceCastOp
+//===----------------------------------------------------------------------===//
+LogicalResult
+convertAddrSpaceCastOp(AddrSpaceCastOp op, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ moduleTranslation.mapValue(op.getRes()) = builder.CreateAddrSpaceCast(
+ moduleTranslation.lookupValue(op.getArg()),
+ moduleTranslation.convertType(op.getResult().getType()));
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
+// AddrSpaceCastOp
+//===----------------------------------------------------------------------===//
+LogicalResult convertIntToPtrOp(IntToPtrOp op, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ moduleTranslation.mapValue(op.getRes()) = builder.CreateIntToPtr(
+ moduleTranslation.lookupValue(op.getArg()),
+ moduleTranslation.convertType(op.getResult().getType()));
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
+// AddrSpaceCastOp
+//===----------------------------------------------------------------------===//
+LogicalResult convertPtrToIntOp(PtrToIntOp op, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ moduleTranslation.mapValue(op.getRes()) = builder.CreatePtrToInt(
+ moduleTranslation.lookupValue(op.getArg()),
+ moduleTranslation.convertType(op.getResult().getType()));
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
+// ConstantOp
+//===----------------------------------------------------------------------===//
+LogicalResult convertConstantOp(ConstantOp op, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
+// TypeOffsetOp
+//===----------------------------------------------------------------------===//
+LogicalResult convertTypeOffsetOp(TypeOffsetOp op, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ return success();
+}
+
+//===----------------------------------------------------------------------===//
+// PtrAddOp
+//===----------------------------------------------------------------------===//
+LogicalResult convertPtrAddOp(PtrAddOp op, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) {
+ return success();
+}
+
+class PtrDialectLLVMIRTranslationInterface
+ : public LLVMTranslationDialectInterface {
+public:
+ using LLVMTranslationDialectInterface::LLVMTranslationDialectInterface;
+
+ LogicalResult
+ convertOperation(Operation *operation, llvm::IRBuilderBase &builder,
+ LLVM::ModuleTranslation &moduleTranslation) const override {
+ return llvm::TypeSwitch<Operation *, LogicalResult>(operation)
+ .Case([&](ptr::AtomicRMWOp op) {
+ return convertAtomicRMWOp(op, builder, moduleTranslation);
+ })
+ .Case([&](ptr::AtomicCmpXchgOp op) {
+ return convertAtomicCmpXchgOp(op, builder, moduleTranslation);
+ })
+ .Case([&](ptr::LoadOp op) {
+ return convertLoadOp(op, builder, moduleTranslation);
+ })
+ .Case([&](ptr::StoreOp op) {
+ return convertStoreOp(op, builder, moduleTranslation);
+ })
+ .Case([&](ptr::AddrSpaceCastOp op) {
+ return convertAddrSpaceCastOp(op, builder, moduleTranslation);
+ })
+ .Case([&](ptr::IntToPtrOp op) {
+ return convertIntToPtrOp(op, builder, moduleTranslation);
+ })
+ .Case([&](ptr::PtrToIntOp op) {
+ return convertPtrToIntOp(op, builder, moduleTranslation);
+ })
+ .Case([&](ptr::ConstantOp op) {
+ return convertConstantOp(op, builder, moduleTranslation);
+ })
+ .Case([&](ptr::TypeOffsetOp op) {
+ return convertTypeOffsetOp(op, builder, moduleTranslation);
+ })
+ .Case([&](ptr::PtrAddOp op) {
+ return convertPtrAddOp(op, builder, moduleTranslation);
+ })
+ .Default([&](Operation *op) {
+ return op->emitError("unsupported Ptr operation: ") << op->getName();
+ });
+ }
+};
+
+} // namespace
+
+void mlir::registerPtrDialectTranslation(DialectRegistry ®istry) {
+ registry.insert<ptr::PtrDialect>();
+ registry.addExtension(+[](MLIRContext *ctx, ptr::PtrDialect *dialect) {
+ dialect->addInterfaces<PtrDialectLLVMIRTranslationInterface>();
+ });
+}
+
+void mlir::registerPtrDialectTranslation(MLIRContext &context) {
+ DialectRegistry registry;
+ registerPtrDialectTranslation(registry);
+ context.appendDialectRegistry(registry);
+}
diff --git a/mlir/lib/Target/LLVMIR/TypeToLLVM.cpp b/mlir/lib/Target/LLVMIR/TypeToLLVM.cpp
index 6d8b415ff09dce..c040ff2f0369a2 100644
--- a/mlir/lib/Target/LLVMIR/TypeToLLVM.cpp
+++ b/mlir/lib/Target/LLVMIR/TypeToLLVM.cpp
@@ -8,6 +8,7 @@
#include "mlir/Target/LLVMIR/TypeToLLVM.h"
#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
+#include "mlir/Dialect/Ptr/IR/PtrTypes.h"
#include "mlir/IR/BuiltinTypes.h"
#include "mlir/IR/MLIRContext.h"
@@ -71,7 +72,7 @@ class TypeToLLVMIRTranslatorImpl {
return llvm::Type::getMetadataTy(context);
})
.Case<LLVM::LLVMArrayType, IntegerType, LLVM::LLVMFunctionType,
- LLVM::LLVMPointerType, LLVM::LLVMStructType,
+ LLVM::LLVMPointerType, ptr::PtrType, LLVM::LLVMStructType,
LLVM::LLVMFixedVectorType, LLVM::LLVMScalableVectorType,
VectorType, LLVM::LLVMTargetExtType>(
[this](auto type) { return this->translate(type); })
@@ -109,6 +110,11 @@ class TypeToLLVMIRTranslatorImpl {
return llvm::PointerType::get(context, type.getAddressSpace());
}
+ /// Translates the given pointer type.
+ llvm::Type *translate(ptr::PtrType type) {
+ return llvm::PointerType::get(context, type.getAddressSpace());
+ }
+
/// Translates the given structure type, supports both identified and literal
/// structs. This will _create_ a new identified structure every time, use
/// `convertType` if a structure with the same name must be looked up instead.
>From 0a6e948ecb9031142d0e4345828d28dcad219b9a Mon Sep 17 00:00:00 2001
From: Fabian Mora <fmora.dev at gmail.com>
Date: Wed, 3 Jan 2024 14:23:07 +0000
Subject: [PATCH 2/3] Switch LLVM pointer ops to Ptr dialect Ops
---
.../mlir/Dialect/LLVMIR/CMakeLists.txt | 2 +
.../mlir/Dialect/LLVMIR/LLVMAttrDefs.td | 23 ++
mlir/include/mlir/Dialect/LLVMIR/LLVMAttrs.h | 1 +
.../include/mlir/Dialect/LLVMIR/LLVMDialect.h | 10 +-
.../mlir/Dialect/LLVMIR/LLVMDialect.td | 3 +
mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td | 36 ---
.../mlir/Dialect/LLVMIR/LLVMInterfaces.td | 46 ++--
.../include/mlir/Dialect/LLVMIR/LLVMOpBase.td | 4 +-
mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td | 205 -----------------
mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h | 39 +++-
mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.td | 33 ---
.../Conversion/AsyncToLLVM/AsyncToLLVM.cpp | 1 +
.../ConvertToLLVM/ConvertToLLVMPass.cpp | 1 +
.../GPUCommon/GPUToLLVMConversion.cpp | 1 +
.../LLVMCommon/ConversionTarget.cpp | 1 +
.../Conversion/MemRefToLLVM/MemRefToLLVM.cpp | 2 +-
.../Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp | 1 +
.../Conversion/SCFToOpenMP/SCFToOpenMP.cpp | 4 +-
.../ConvertLaunchFuncToLLVMCalls.cpp | 1 +
.../SPIRVToLLVM/SPIRVToLLVMPass.cpp | 1 +
mlir/lib/Dialect/LLVMIR/CMakeLists.txt | 1 +
mlir/lib/Dialect/LLVMIR/IR/LLVMAttrs.cpp | 209 ++++++++++++++++++
mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp | 190 +++++-----------
mlir/lib/Dialect/LLVMIR/IR/LLVMInterfaces.cpp | 12 -
mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp | 90 +-------
mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp | 181 ++++-----------
mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp | 5 +-
mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp | 5 +-
mlir/lib/ExecutionEngine/CMakeLists.txt | 2 +
mlir/lib/Target/LLVMIR/ModuleTranslation.cpp | 4 +-
.../AsyncToLLVM/convert-runtime-to-llvm.mlir | 6 +-
.../AsyncToLLVM/convert-to-llvm.mlir | 6 +-
.../FuncToLLVM/calling-convention.mlir | 12 +-
.../lower-alloc-to-gpu-runtime-calls.mlir | 4 +-
.../lower-memcpy-to-gpu-runtime-calls.mlir | 6 +-
.../GPUCommon/lower-memory-space-attrs.mlir | 14 +-
.../lower-memset-to-gpu-runtime-calls.mlir | 2 +-
.../GPUCommon/memory-attrbution.mlir | 8 +-
.../Conversion/GPUCommon/transfer_write.mlir | 2 +-
.../Conversion/GPUToNVVM/gpu-to-nvvm.mlir | 6 +-
.../Conversion/GPUToVulkan/invoke-vulkan.mlir | 4 +-
.../convert-dynamic-memref-ops.mlir | 60 ++---
.../convert-static-memref-ops.mlir | 38 ++--
.../expand-then-convert-to-llvm.mlir | 4 +-
.../MemRefToLLVM/memref-to-llvm.mlir | 60 ++---
.../Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir | 12 +-
.../OpenMPToLLVM/convert-to-llvmir.mlir | 28 +--
.../Conversion/SCFToOpenMP/reductions.mlir | 20 +-
.../SPIRVToLLVM/memory-ops-to-llvm.mlir | 24 +-
.../VectorToLLVM/vector-scalable-memcpy.mlir | 4 +-
.../VectorToLLVM/vector-to-llvm.mlir | 10 +-
mlir/test/Dialect/LLVMIR/canonicalize.mlir | 20 +-
.../Dialect/LLVMIR/inlining-alias-scopes.mlir | 122 +++++-----
mlir/test/Dialect/LLVMIR/inlining.mlir | 42 ++--
mlir/test/Dialect/LLVMIR/invalid.mlir | 49 ++--
mlir/test/Dialect/LLVMIR/mem2reg-dbginfo.mlir | 16 +-
.../Dialect/LLVMIR/mem2reg-intrinsics.mlir | 44 ++--
mlir/test/Dialect/LLVMIR/mem2reg.mlir | 172 +++++++-------
mlir/test/Dialect/LLVMIR/opaque-ptr.mlir | 12 +-
mlir/test/Dialect/LLVMIR/roundtrip.mlir | 44 ++--
mlir/test/Dialect/LLVMIR/sroa-intrinsics.mlir | 44 ++--
mlir/test/Dialect/LLVMIR/sroa-statistics.mlir | 10 +-
mlir/test/Dialect/LLVMIR/sroa.mlir | 46 ++--
mlir/test/Dialect/LLVMIR/tbaa-roundtrip.mlir | 24 +-
.../test/Dialect/LLVMIR/type-consistency.mlir | 194 ++++++++--------
mlir/test/Dialect/MemRef/transform-ops.mlir | 2 +-
mlir/test/Dialect/OpenACC/invalid.mlir | 10 +-
mlir/test/Dialect/OpenACC/ops.mlir | 4 +-
mlir/test/Dialect/OpenMP/canonicalize.mlir | 2 +-
mlir/test/Dialect/OpenMP/invalid.mlir | 8 +-
mlir/test/Dialect/OpenMP/ops.mlir | 18 +-
mlir/test/Target/LLVMIR/arm-sve.mlir | 4 +-
.../Target/LLVMIR/attribute-alias-scopes.mlir | 6 +-
mlir/test/Target/LLVMIR/attribute-tbaa.mlir | 10 +-
mlir/test/Target/LLVMIR/llvmir.mlir | 86 +++----
mlir/test/Target/LLVMIR/loop-metadata.mlir | 6 +-
.../omptarget-array-sectioning-host.mlir | 4 +-
...target-byref-bycopy-generation-device.mlir | 4 +-
...mptarget-byref-bycopy-generation-host.mlir | 4 +-
...arget-constant-indexing-device-region.mlir | 4 +-
.../omptarget-declare-target-llvm-device.mlir | 2 +-
.../omptarget-declare-target-llvm-host.mlir | 2 +-
mlir/test/Target/LLVMIR/omptarget-llvm.mlir | 44 ++--
.../LLVMIR/omptarget-parallel-llvm.mlir | 8 +-
.../LLVMIR/omptarget-parallel-wsloop.mlir | 2 +-
.../LLVMIR/omptarget-region-device-llvm.mlir | 10 +-
.../omptarget-region-llvm-target-device.mlir | 4 +-
.../Target/LLVMIR/omptarget-region-llvm.mlir | 10 +-
.../omptarget-region-parallel-llvm.mlir | 10 +-
mlir/test/Target/LLVMIR/omptarget-wsloop.mlir | 2 +-
mlir/test/Target/LLVMIR/opaque-ptr.mlir | 6 +-
mlir/test/Target/LLVMIR/openacc-llvm.mlir | 2 +-
.../Target/LLVMIR/openmp-llvm-invalid.mlir | 6 +-
mlir/test/Target/LLVMIR/openmp-llvm.mlir | 102 ++++-----
mlir/test/Target/LLVMIR/openmp-nested.mlir | 4 +-
mlir/test/Target/LLVMIR/openmp-reduction.mlir | 28 +--
mlir/test/Target/LLVMIR/openmp-teams.mlir | 2 +-
mlir/test/Target/LLVMIR/target-ext-type.mlir | 2 +-
mlir/test/mlir-cpu-runner/simple.mlir | 4 +-
mlir/test/mlir-cpu-runner/x86-varargs.mlir | 12 +-
mlir/unittests/ExecutionEngine/Invoke.cpp | 7 +
101 files changed, 1240 insertions(+), 1484 deletions(-)
diff --git a/mlir/include/mlir/Dialect/LLVMIR/CMakeLists.txt b/mlir/include/mlir/Dialect/LLVMIR/CMakeLists.txt
index 8e41fcc05a161e..5e7d14301e6f57 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/CMakeLists.txt
+++ b/mlir/include/mlir/Dialect/LLVMIR/CMakeLists.txt
@@ -13,6 +13,8 @@ mlir_tablegen(LLVMOpsAttrDefs.cpp.inc -gen-attrdef-defs
-attrdefs-dialect=llvm)
add_public_tablegen_target(MLIRLLVMOpsIncGen)
+add_dependencies(MLIRLLVMOpsIncGen MLIRPtrOpsIncGen)
+
set(LLVM_TARGET_DEFINITIONS LLVMTypes.td)
mlir_tablegen(LLVMTypes.h.inc -gen-typedef-decls -typedefs-dialect=llvm)
mlir_tablegen(LLVMTypes.cpp.inc -gen-typedef-defs -typedefs-dialect=llvm)
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td
index f36ec0d02cf70d..da0e68fb5b5fde 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrDefs.td
@@ -10,6 +10,7 @@
#define LLVMIR_ATTRDEFS
include "mlir/Dialect/LLVMIR/LLVMDialect.td"
+include "mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.td"
include "mlir/IR/AttrTypeBase.td"
include "mlir/IR/CommonAttrConstraints.td"
@@ -997,4 +998,26 @@ def LLVM_TargetFeaturesAttr : LLVM_Attr<"TargetFeatures", "target_features">
let genVerifyDecl = 1;
}
+//===----------------------------------------------------------------------===//
+// AddressSpaceAttr
+//===----------------------------------------------------------------------===//
+
+def AddressSpaceAttr : LLVM_Attr<"AddressSpace", "address_space", [
+ DeclareAttrInterfaceMethods<MemorySpaceAttrInterface, [
+ "getModelOwner",
+ "getAddressSpace",
+ "getDefaultMemorySpace",
+ "isLoadableType",
+ "isStorableType",
+ "verifyCompatibleAtomicOp",
+ "verifyAtomicRMW",
+ "verifyAtomicAtomicCmpXchg",
+ "verifyPtrCast",
+ "verifyIntCastTypes",
+ "removeStoreBlockingUses"]>
+ ]> {
+ let parameters = (ins DefaultValuedParameter<"unsigned", "0">:$as);
+ let assemblyFormat = "(`<` $as^ `>`)?";
+}
+
#endif // LLVMIR_ATTRDEFS
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrs.h b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrs.h
index c370bfa2b733d6..bbc5898266c498 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrs.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMAttrs.h
@@ -15,6 +15,7 @@
#define MLIR_DIALECT_LLVMIR_LLVMATTRS_H_
#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
+#include "mlir/Dialect/Ptr/IR/MemorySpaceInterfaces.h"
#include "mlir/IR/OpImplementation.h"
#include <optional>
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h b/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h
index 06df4a601b7a3f..fe67e96ebe2736 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.h
@@ -18,6 +18,7 @@
#include "mlir/Dialect/LLVMIR/LLVMAttrs.h"
#include "mlir/Dialect/LLVMIR/LLVMInterfaces.h"
#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
+#include "mlir/Dialect/Ptr/IR/PtrOps.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/Dialect.h"
#include "mlir/IR/OpDefinition.h"
@@ -84,6 +85,14 @@ class GEPArg : public PointerUnion<Value, GEPConstantIndex> {
using BaseT::operator=;
};
+
+using AtomicBinOp = ::mlir::ptr::AtomicBinOp;
+using AtomicRMWOp = ::mlir::ptr::AtomicRMWOp;
+using LoadOp = ::mlir::ptr::LoadOp;
+using StoreOp = ::mlir::ptr::StoreOp;
+using AddrSpaceCastOp = ::mlir::ptr::AddrSpaceCastOp;
+using IntToPtrOp = ::mlir::ptr::IntToPtrOp;
+using PtrToIntOp = ::mlir::ptr::PtrToIntOp;
} // namespace LLVM
} // namespace mlir
@@ -232,7 +241,6 @@ template <typename IntT = int64_t>
SmallVector<IntT> convertArrayToIndices(ArrayAttr attrs) {
return convertArrayToIndices<IntT>(attrs.getValue());
}
-
} // namespace LLVM
} // namespace mlir
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.td
index c4c011f30b3bcd..a706f5f115bcea 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMDialect.td
@@ -96,6 +96,9 @@ def LLVM_Dialect : Dialect {
/// Register the attributes of this dialect.
void registerAttributes();
}];
+ let dependentDialects = [
+ "ptr::PtrDialect"
+ ];
}
#endif // LLVMIR_DIALECT
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
index a7b269eb41ee2e..f21d74af8e4d82 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMEnums.td
@@ -87,42 +87,6 @@ def AsmATTOrIntel : LLVM_EnumAttr<
// Atomic Operations
//===----------------------------------------------------------------------===//
-def AtomicBinOpXchg : LLVM_EnumAttrCase<"xchg", "xchg", "Xchg", 0>;
-def AtomicBinOpAdd : LLVM_EnumAttrCase<"add", "add", "Add", 1>;
-def AtomicBinOpSub : LLVM_EnumAttrCase<"sub", "sub", "Sub", 2>;
-def AtomicBinOpAnd : LLVM_EnumAttrCase<"_and", "_and", "And", 3>;
-def AtomicBinOpNand : LLVM_EnumAttrCase<"nand", "nand", "Nand", 4>;
-def AtomicBinOpOr : LLVM_EnumAttrCase<"_or", "_or", "Or", 5>;
-def AtomicBinOpXor : LLVM_EnumAttrCase<"_xor", "_xor", "Xor", 6>;
-def AtomicBinOpMax : LLVM_EnumAttrCase<"max", "max", "Max", 7>;
-def AtomicBinOpMin : LLVM_EnumAttrCase<"min", "min", "Min", 8>;
-def AtomicBinOpUMax : LLVM_EnumAttrCase<"umax", "umax", "UMax", 9>;
-def AtomicBinOpUMin : LLVM_EnumAttrCase<"umin", "umin", "UMin", 10>;
-def AtomicBinOpFAdd : LLVM_EnumAttrCase<"fadd", "fadd", "FAdd", 11>;
-def AtomicBinOpFSub : LLVM_EnumAttrCase<"fsub", "fsub", "FSub", 12>;
-def AtomicBinOpFMax : LLVM_EnumAttrCase<"fmax", "fmax", "FMax", 13>;
-def AtomicBinOpFMin : LLVM_EnumAttrCase<"fmin", "fmin", "FMin", 14>;
-def AtomicBinOpUIncWrap : LLVM_EnumAttrCase<"uinc_wrap",
- "uinc_wrap", "UIncWrap", 15>;
-def AtomicBinOpUDecWrap : LLVM_EnumAttrCase<"udec_wrap",
- "udec_wrap", "UDecWrap", 16>;
-
-// A sentinel value that has no MLIR counterpart.
-def AtomicBadBinOp : LLVM_EnumAttrCase<"", "", "BAD_BINOP", 0>;
-
-def AtomicBinOp : LLVM_EnumAttr<
- "AtomicBinOp",
- "::llvm::AtomicRMWInst::BinOp",
- "llvm.atomicrmw binary operations",
- [AtomicBinOpXchg, AtomicBinOpAdd, AtomicBinOpSub, AtomicBinOpAnd,
- AtomicBinOpNand, AtomicBinOpOr, AtomicBinOpXor, AtomicBinOpMax,
- AtomicBinOpMin, AtomicBinOpUMax, AtomicBinOpUMin, AtomicBinOpFAdd,
- AtomicBinOpFSub, AtomicBinOpFMax, AtomicBinOpFMin, AtomicBinOpUIncWrap,
- AtomicBinOpUDecWrap],
- [AtomicBadBinOp]> {
- let cppNamespace = "::mlir::LLVM";
-}
-
def AtomicOrderingNotAtomic : LLVM_EnumAttrCase<"not_atomic",
"not_atomic", "NotAtomic", 0>;
def AtomicOrderingUnordered : LLVM_EnumAttrCase<"unordered",
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMInterfaces.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMInterfaces.td
index 81589eaf5fd0a4..e0f7a5c96cb739 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMInterfaces.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMInterfaces.td
@@ -160,8 +160,8 @@ def AccessGroupOpInterface : OpInterface<"AccessGroupOpInterface"> {
/*args=*/ (ins),
/*methodBody=*/ [{}],
/*defaultImpl=*/ [{
- auto op = cast<ConcreteOp>(this->getOperation());
- return op.getAccessGroupsAttr();
+ return mlir::dyn_cast_or_null<ArrayAttr>(
+ $_op->getAttr("access_groups"));
}]
>,
InterfaceMethod<
@@ -171,8 +171,10 @@ def AccessGroupOpInterface : OpInterface<"AccessGroupOpInterface"> {
/*args=*/ (ins "const ArrayAttr":$attr),
/*methodBody=*/ [{}],
/*defaultImpl=*/ [{
- auto op = cast<ConcreteOp>(this->getOperation());
- op.setAccessGroupsAttr(attr);
+ if (attr)
+ $_op->setAttr("access_groups", attr);
+ else
+ $_op->removeAttr("access_groups");
}]
>
];
@@ -198,8 +200,8 @@ def AliasAnalysisOpInterface : OpInterface<"AliasAnalysisOpInterface"> {
/*args=*/ (ins),
/*methodBody=*/ [{}],
/*defaultImpl=*/ [{
- auto op = cast<ConcreteOp>(this->getOperation());
- return op.getAliasScopesAttr();
+ return mlir::dyn_cast_or_null<ArrayAttr>(
+ $_op->getAttr("alias_scopes"));
}]
>,
InterfaceMethod<
@@ -209,8 +211,10 @@ def AliasAnalysisOpInterface : OpInterface<"AliasAnalysisOpInterface"> {
/*args=*/ (ins "const ArrayAttr":$attr),
/*methodBody=*/ [{}],
/*defaultImpl=*/ [{
- auto op = cast<ConcreteOp>(this->getOperation());
- op.setAliasScopesAttr(attr);
+ if (attr)
+ $_op->setAttr("alias_scopes", attr);
+ else
+ $_op->removeAttr("alias_scopes");
}]
>,
InterfaceMethod<
@@ -220,8 +224,8 @@ def AliasAnalysisOpInterface : OpInterface<"AliasAnalysisOpInterface"> {
/*args=*/ (ins),
/*methodBody=*/ [{}],
/*defaultImpl=*/ [{
- auto op = cast<ConcreteOp>(this->getOperation());
- return op.getNoaliasScopesAttr();
+ return mlir::dyn_cast_or_null<ArrayAttr>(
+ $_op->getAttr("noalias_scopes"));
}]
>,
InterfaceMethod<
@@ -231,8 +235,10 @@ def AliasAnalysisOpInterface : OpInterface<"AliasAnalysisOpInterface"> {
/*args=*/ (ins "const ArrayAttr":$attr),
/*methodBody=*/ [{}],
/*defaultImpl=*/ [{
- auto op = cast<ConcreteOp>(this->getOperation());
- op.setNoaliasScopesAttr(attr);
+ if (attr)
+ $_op->setAttr("noalias_scopes", attr);
+ else
+ $_op->removeAttr("noalias_scopes");
}]
>,
InterfaceMethod<
@@ -242,8 +248,8 @@ def AliasAnalysisOpInterface : OpInterface<"AliasAnalysisOpInterface"> {
/*args=*/ (ins),
/*methodBody=*/ [{}],
/*defaultImpl=*/ [{
- auto op = cast<ConcreteOp>(this->getOperation());
- return op.getTbaaAttr();
+ return mlir::dyn_cast_or_null<ArrayAttr>(
+ $_op->getAttr("tbaa"));
}]
>,
InterfaceMethod<
@@ -253,8 +259,10 @@ def AliasAnalysisOpInterface : OpInterface<"AliasAnalysisOpInterface"> {
/*args=*/ (ins "const ArrayAttr":$attr),
/*methodBody=*/ [{}],
/*defaultImpl=*/ [{
- auto op = cast<ConcreteOp>(this->getOperation());
- op.setTbaaAttr(attr);
+ if (attr)
+ $_op->setAttr("tbaa", attr);
+ else
+ $_op->removeAttr("tbaa");
}]
>,
InterfaceMethod<
@@ -262,7 +270,11 @@ def AliasAnalysisOpInterface : OpInterface<"AliasAnalysisOpInterface"> {
"operation",
/*returnType=*/ "::llvm::SmallVector<::mlir::Value>",
/*methodName=*/ "getAccessedOperands",
- /*args=*/ (ins)
+ /*args=*/ (ins),
+ /*methodBody=*/ [{}],
+ /*defaultImpl=*/ [{
+ return {};
+ }]
>
];
}
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
index b6aa73dad22970..fe5c10a6f13e57 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOpBase.td
@@ -250,7 +250,7 @@ class LLVM_Op<string mnemonic, list<Trait> traits = []> :
class LLVM_MemAccessOpBase<string mnemonic, list<Trait> traits = []> :
LLVM_Op<mnemonic, !listconcat([
DeclareOpInterfaceMethods<AccessGroupOpInterface>,
- DeclareOpInterfaceMethods<AliasAnalysisOpInterface>], traits)>,
+ DeclareOpInterfaceMethods<AliasAnalysisOpInterface, ["getAccessedOperands"]>], traits)>,
LLVM_MemOpPatterns {
dag aliasAttrs = (ins OptionalAttr<LLVM_AccessGroupArrayAttr>:$access_groups,
OptionalAttr<LLVM_AliasScopeArrayAttr>:$alias_scopes,
@@ -294,7 +294,7 @@ class LLVM_IntrOpBase<Dialect dialect, string opName, string enumName,
!if(!gt(requiresAccessGroup, 0),
[DeclareOpInterfaceMethods<AccessGroupOpInterface>], []),
!if(!gt(requiresAliasAnalysis, 0),
- [DeclareOpInterfaceMethods<AliasAnalysisOpInterface>], []),
+ [DeclareOpInterfaceMethods<AliasAnalysisOpInterface, ["getAccessedOperands"]>], []),
!if(!gt(requiresFastmath, 0),
[DeclareOpInterfaceMethods<FastmathFlagsInterface>], []),
traits)>,
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
index 9e65898154bd65..a91ea594d99fce 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMOps.td
@@ -322,149 +322,6 @@ def LLVM_GEPOp : LLVM_Op<"getelementptr", [Pure,
let hasVerifier = 1;
}
-def LLVM_LoadOp : LLVM_MemAccessOpBase<"load",
- [DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
- DeclareOpInterfaceMethods<PromotableMemOpInterface>,
- DeclareOpInterfaceMethods<SafeMemorySlotAccessOpInterface>]> {
- dag args = (ins LLVM_AnyPointer:$addr,
- OptionalAttr<I64Attr>:$alignment,
- UnitAttr:$volatile_,
- UnitAttr:$nontemporal,
- DefaultValuedAttr<
- AtomicOrdering, "AtomicOrdering::not_atomic">:$ordering,
- OptionalAttr<StrAttr>:$syncscope);
- // Append the aliasing related attributes defined in LLVM_MemAccessOpBase.
- let arguments = !con(args, aliasAttrs);
- let results = (outs LLVM_LoadableType:$res);
- string llvmInstName = "Load";
- let description = [{
- The `load` operation is used to read from memory. A load may be marked as
- atomic, volatile, and/or nontemporal, and takes a number of optional
- attributes that specify aliasing information.
-
- An atomic load only supports a limited set of pointer, integer, and
- floating point types, and requires an explicit alignment.
-
- Examples:
- ```mlir
- // A volatile load of a float variable.
- %0 = llvm.load volatile %ptr : !llvm.ptr -> f32
-
- // A nontemporal load of a float variable.
- %0 = llvm.load %ptr {nontemporal} : !llvm.ptr -> f32
-
- // An atomic load of an integer variable.
- %0 = llvm.load %ptr atomic monotonic {alignment = 8 : i64}
- : !llvm.ptr -> i64
- ```
-
- See the following link for more details:
- https://llvm.org/docs/LangRef.html#load-instruction
- }];
- let assemblyFormat = [{
- (`volatile` $volatile_^)? $addr
- (`atomic` (`syncscope` `(` $syncscope^ `)`)? $ordering^)?
- attr-dict `:` qualified(type($addr)) `->` type($res)
- }];
- string llvmBuilder = [{
- auto *inst = builder.CreateLoad($_resultType, $addr, $volatile_);
- $res = inst;
- }] # setOrderingCode
- # setSyncScopeCode
- # setAlignmentCode
- # setNonTemporalMetadataCode
- # setAccessGroupsMetadataCode
- # setAliasAnalysisMetadataCode;
- string mlirBuilder = [{
- auto *loadInst = cast<llvm::LoadInst>(inst);
- unsigned alignment = loadInst->getAlign().value();
- $res = $_builder.create<LLVM::LoadOp>($_location, $_resultType, $addr,
- alignment, loadInst->isVolatile(),
- loadInst->hasMetadata(llvm::LLVMContext::MD_nontemporal),
- convertAtomicOrderingFromLLVM(loadInst->getOrdering()),
- getLLVMSyncScope(loadInst));
- }];
- let builders = [
- OpBuilder<(ins "Type":$type, "Value":$addr,
- CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isVolatile,
- CArg<"bool", "false">:$isNonTemporal,
- CArg<"AtomicOrdering", "AtomicOrdering::not_atomic">:$ordering,
- CArg<"StringRef", "StringRef()">:$syncscope)>
- ];
- let hasVerifier = 1;
-}
-
-def LLVM_StoreOp : LLVM_MemAccessOpBase<"store",
- [DeclareOpInterfaceMethods<MemoryEffectsOpInterface>,
- DeclareOpInterfaceMethods<PromotableMemOpInterface>,
- DeclareOpInterfaceMethods<SafeMemorySlotAccessOpInterface>]> {
- dag args = (ins LLVM_LoadableType:$value,
- LLVM_AnyPointer:$addr,
- OptionalAttr<I64Attr>:$alignment,
- UnitAttr:$volatile_,
- UnitAttr:$nontemporal,
- DefaultValuedAttr<
- AtomicOrdering, "AtomicOrdering::not_atomic">:$ordering,
- OptionalAttr<StrAttr>:$syncscope);
- // Append the aliasing related attributes defined in LLVM_MemAccessOpBase.
- let arguments = !con(args, aliasAttrs);
- string llvmInstName = "Store";
- let description = [{
- The `store` operation is used to write to memory. A store may be marked as
- atomic, volatile, and/or nontemporal, and takes a number of optional
- attributes that specify aliasing information.
-
- An atomic store only supports a limited set of pointer, integer, and
- floating point types, and requires an explicit alignment.
-
- Examples:
- ```mlir
- // A volatile store of a float variable.
- llvm.store volatile %val, %ptr : f32, !llvm.ptr
-
- // A nontemporal store of a float variable.
- llvm.store %val, %ptr {nontemporal} : f32, !llvm.ptr
-
- // An atomic store of an integer variable.
- llvm.store %val, %ptr atomic monotonic {alignment = 8 : i64}
- : i64, !llvm.ptr
- ```
-
- See the following link for more details:
- https://llvm.org/docs/LangRef.html#store-instruction
- }];
- let assemblyFormat = [{
- (`volatile` $volatile_^)? $value `,` $addr
- (`atomic` (`syncscope` `(` $syncscope^ `)`)? $ordering^)?
- attr-dict `:` type($value) `,` qualified(type($addr))
- }];
- string llvmBuilder = [{
- auto *inst = builder.CreateStore($value, $addr, $volatile_);
- }] # setOrderingCode
- # setSyncScopeCode
- # setAlignmentCode
- # setNonTemporalMetadataCode
- # setAccessGroupsMetadataCode
- # setAliasAnalysisMetadataCode;
- string mlirBuilder = [{
- auto *storeInst = cast<llvm::StoreInst>(inst);
- unsigned alignment = storeInst->getAlign().value();
- $_op = $_builder.create<LLVM::StoreOp>($_location, $value, $addr,
- alignment, storeInst->isVolatile(),
- storeInst->hasMetadata(llvm::LLVMContext::MD_nontemporal),
- convertAtomicOrderingFromLLVM(storeInst->getOrdering()),
- getLLVMSyncScope(storeInst));
- }];
- let builders = [
- OpBuilder<(ins "Value":$value, "Value":$addr,
- CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isVolatile,
- CArg<"bool", "false">:$isNonTemporal,
- CArg<"AtomicOrdering", "AtomicOrdering::not_atomic">:$ordering,
- CArg<"StringRef", "StringRef()">:$syncscope)>
- ];
- let hasVerifier = 1;
-}
-
// Casts.
class LLVM_CastOp<string mnemonic, string instName, Type type,
Type resultType, list<Trait> traits = []> :
@@ -485,18 +342,6 @@ def LLVM_BitcastOp : LLVM_CastOp<"bitcast", "BitCast", LLVM_AnyNonAggregate,
let hasFolder = 1;
let hasVerifier = 1;
}
-def LLVM_AddrSpaceCastOp : LLVM_CastOp<"addrspacecast", "AddrSpaceCast",
- LLVM_ScalarOrVectorOf<LLVM_AnyPointer>,
- LLVM_ScalarOrVectorOf<LLVM_AnyPointer>,
- [DeclareOpInterfaceMethods<PromotableOpInterface>]> {
- let hasFolder = 1;
-}
-def LLVM_IntToPtrOp : LLVM_CastOp<"inttoptr", "IntToPtr",
- LLVM_ScalarOrVectorOf<AnyInteger>,
- LLVM_ScalarOrVectorOf<LLVM_AnyPointer>>;
-def LLVM_PtrToIntOp : LLVM_CastOp<"ptrtoint", "PtrToInt",
- LLVM_ScalarOrVectorOf<LLVM_AnyPointer>,
- LLVM_ScalarOrVectorOf<AnyInteger>>;
def LLVM_SExtOp : LLVM_CastOp<"sext", "SExt",
LLVM_ScalarOrVectorOf<AnyInteger>,
LLVM_ScalarOrVectorOf<AnyInteger>> {
@@ -1629,56 +1474,6 @@ def LLVM_ConstantOp
// Atomic operations.
//
-
-def LLVM_AtomicRMWType : AnyTypeOf<[LLVM_AnyFloat, LLVM_AnyPointer, AnyInteger]>;
-
-def LLVM_AtomicRMWOp : LLVM_MemAccessOpBase<"atomicrmw", [
- TypesMatchWith<"result #0 and operand #1 have the same type",
- "val", "res", "$_self">]> {
- dag args = (ins AtomicBinOp:$bin_op,
- LLVM_AnyPointer:$ptr,
- LLVM_AtomicRMWType:$val, AtomicOrdering:$ordering,
- OptionalAttr<StrAttr>:$syncscope,
- OptionalAttr<I64Attr>:$alignment,
- UnitAttr:$volatile_);
- // Append the aliasing related attributes defined in LLVM_MemAccessOpBase.
- let arguments = !con(args, aliasAttrs);
- let results = (outs LLVM_AtomicRMWType:$res);
- let assemblyFormat = [{
- (`volatile` $volatile_^)? $bin_op $ptr `,` $val
- (`syncscope` `(` $syncscope^ `)`)? $ordering attr-dict `:`
- qualified(type($ptr)) `,` type($val)
- }];
- string llvmInstName = "AtomicRMW";
- string llvmBuilder = [{
- auto *inst = builder.CreateAtomicRMW(
- convertAtomicBinOpToLLVM($bin_op), $ptr, $val, llvm::MaybeAlign(),
- convertAtomicOrderingToLLVM($ordering));
- $res = inst;
- }] # setVolatileCode
- # setSyncScopeCode
- # setAlignmentCode
- # setAccessGroupsMetadataCode
- # setAliasAnalysisMetadataCode;
- string mlirBuilder = [{
- auto *atomicInst = cast<llvm::AtomicRMWInst>(inst);
- unsigned alignment = atomicInst->getAlign().value();
- $res = $_builder.create<LLVM::AtomicRMWOp>($_location,
- convertAtomicBinOpFromLLVM(atomicInst->getOperation()), $ptr, $val,
- convertAtomicOrderingFromLLVM(atomicInst->getOrdering()),
- getLLVMSyncScope(atomicInst), alignment, atomicInst->isVolatile());
- }];
- list<int> llvmArgIndices = [-1, 0, 1, -1, -1, -1, -1, -1, -1, -1, -1];
- let builders = [
- OpBuilder<(ins "LLVM::AtomicBinOp":$binOp, "Value":$ptr, "Value":$val,
- "LLVM::AtomicOrdering":$ordering,
- CArg<"StringRef", "StringRef()">:$syncscope,
- CArg<"unsigned", "0">:$alignment, CArg<"bool", "false">:$isVolatile
- )>
- ];
- let hasVerifier = 1;
-}
-
def LLVM_AtomicCmpXchgType : AnyTypeOf<[AnyInteger, LLVM_AnyPointer]>;
def LLVM_AtomicCmpXchgOp : LLVM_MemAccessOpBase<"cmpxchg", [
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h
index 93733ccd4929ae..2fdda5382e5aa9 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.h
@@ -14,6 +14,7 @@
#ifndef MLIR_DIALECT_LLVMIR_LLVMTYPES_H_
#define MLIR_DIALECT_LLVMIR_LLVMTYPES_H_
+#include "mlir/Dialect/Ptr/IR/PtrTypes.h"
#include "mlir/IR/Types.h"
#include "mlir/Interfaces/DataLayoutInterfaces.h"
#include "mlir/Interfaces/MemorySlotInterfaces.h"
@@ -283,18 +284,36 @@ Type getScalableVectorType(Type elementType, unsigned numElements);
/// (aggregates such as struct) or types that don't have a size (such as void).
llvm::TypeSize getPrimitiveTypeSizeInBits(Type type);
-/// The positions of different values in the data layout entry for pointers.
-enum class PtrDLEntryPos { Size = 0, Abi = 1, Preferred = 2, Index = 3 };
-
-/// Returns the value that corresponds to named position `pos` from the
-/// data layout entry `attr` assuming it's a dense integer elements attribute.
-/// Returns `std::nullopt` if `pos` is not present in the entry.
-/// Currently only `PtrDLEntryPos::Index` is optional, and all other positions
-/// may be assumed to be present.
-std::optional<uint64_t> extractPointerSpecValue(Attribute attr,
- PtrDLEntryPos pos);
+/// Returns whether a pointer type has an LLVM address space.
+bool isLLVMPointerType(Type type);
+/// Utility class for creating pointer types of the form
+/// `ptr.ptr<#llvm.address_space<#int_attr>>`
+class LLVMPointerType : public ptr::PtrType {
+public:
+ LLVMPointerType() : ptr::PtrType() {}
+ LLVMPointerType(const ptr::PtrType &ty) : ptr::PtrType(ty) {}
+ template <typename T>
+ static bool classof(T val) {
+ static_assert(std::is_convertible<Type, T>::value,
+ "casting from a non-convertible type");
+ return isLLVMPointerType(val);
+ }
+ static PtrType get(::mlir::MLIRContext *context, unsigned addressSpace = 0);
+ static ::mlir::Type parse(::mlir::AsmParser &odsParser);
+ void print(::mlir::AsmPrinter &odsPrinter) const;
+};
} // namespace LLVM
} // namespace mlir
+namespace mlir {
+namespace detail {
+template <>
+class TypeIDResolver<LLVM::LLVMPointerType> {
+public:
+ static TypeID resolveTypeID() { return TypeID::get<ptr::PtrType>(); }
+};
+} /* namespace detail */
+} // namespace mlir
+
#endif // MLIR_DIALECT_LLVMIR_LLVMTYPES_H_
diff --git a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.td b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.td
index 96cdbf01b4bd91..75086e1c4214d3 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/LLVMTypes.td
@@ -117,39 +117,6 @@ def LLVMFunctionType : LLVMType<"LLVMFunction", "func"> {
}];
}
-//===----------------------------------------------------------------------===//
-// LLVMPointerType
-//===----------------------------------------------------------------------===//
-
-def LLVMPointerType : LLVMType<"LLVMPointer", "ptr", [
- DeclareTypeInterfaceMethods<DataLayoutTypeInterface, [
- "areCompatible", "verifyEntries"]>]> {
- let summary = "LLVM pointer type";
- let description = [{
- The `!llvm.ptr` type is an LLVM pointer type. This type typically represents
- a reference to an object in memory. Pointers are optionally parameterized
- by the address space.
-
- Example:
-
- ```mlir
- !llvm.ptr
- ```
- }];
-
- let parameters = (ins DefaultValuedParameter<"unsigned", "0">:$addressSpace);
- let assemblyFormat = [{
- (`<` $addressSpace^ `>`)?
- }];
-
- let skipDefaultBuilders = 1;
- let builders = [
- TypeBuilder<(ins CArg<"unsigned", "0">:$addressSpace), [{
- return $_get($_ctxt, addressSpace);
- }]>
- ];
-}
-
//===----------------------------------------------------------------------===//
// LLVMFixedVectorType
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
index 0ab53ce7e3327e..013d67853450d6 100644
--- a/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
+++ b/mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp
@@ -1072,6 +1072,7 @@ void ConvertAsyncToLLVMPass::runOnOperation() {
ConversionTarget target(*ctx);
target.addLegalOp<arith::ConstantOp, func::ConstantOp,
UnrealizedConversionCastOp>();
+ target.addLegalDialect<ptr::PtrDialect>();
target.addLegalDialect<LLVM::LLVMDialect>();
// All operations from Async dialect must be lowered to the runtime API and
diff --git a/mlir/lib/Conversion/ConvertToLLVM/ConvertToLLVMPass.cpp b/mlir/lib/Conversion/ConvertToLLVM/ConvertToLLVMPass.cpp
index 6135117348a5b8..ab71c81989fd3c 100644
--- a/mlir/lib/Conversion/ConvertToLLVM/ConvertToLLVMPass.cpp
+++ b/mlir/lib/Conversion/ConvertToLLVM/ConvertToLLVMPass.cpp
@@ -75,6 +75,7 @@ class ConvertToLLVMPass
LogicalResult initialize(MLIRContext *context) final {
RewritePatternSet tempPatterns(context);
auto target = std::make_shared<ConversionTarget>(*context);
+ target->addLegalDialect<ptr::PtrDialect>();
target->addLegalDialect<LLVM::LLVMDialect>();
auto typeConverter = std::make_shared<LLVMTypeConverter>(context);
diff --git a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
index 94df3765a67e74..6f0e8cc6930054 100644
--- a/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
+++ b/mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp
@@ -592,6 +592,7 @@ void GpuToLLVMConversionPass::runOnOperation() {
options.useBarePtrCallConv = hostBarePtrCallConv;
RewritePatternSet patterns(context);
ConversionTarget target(*context);
+ target.addLegalDialect<ptr::PtrDialect>();
target.addLegalDialect<LLVM::LLVMDialect>();
LLVMTypeConverter converter(context, options);
diff --git a/mlir/lib/Conversion/LLVMCommon/ConversionTarget.cpp b/mlir/lib/Conversion/LLVMCommon/ConversionTarget.cpp
index 56b4bd7d30a104..2fde4c1cd9e2c0 100644
--- a/mlir/lib/Conversion/LLVMCommon/ConversionTarget.cpp
+++ b/mlir/lib/Conversion/LLVMCommon/ConversionTarget.cpp
@@ -13,6 +13,7 @@ using namespace mlir;
mlir::LLVMConversionTarget::LLVMConversionTarget(MLIRContext &ctx)
: ConversionTarget(ctx) {
+ this->addLegalDialect<ptr::PtrDialect>();
this->addLegalDialect<LLVM::LLVMDialect>();
this->addLegalOp<UnrealizedConversionCastOp>();
}
diff --git a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
index 2bfca303b5fd48..3c048a91d24711 100644
--- a/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
+++ b/mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp
@@ -1576,7 +1576,7 @@ struct AtomicRMWOpLowering : public LoadStoreOpLowering<memref::AtomicRMWOp> {
adaptor.getIndices(), rewriter);
rewriter.replaceOpWithNewOp<LLVM::AtomicRMWOp>(
atomicOp, *maybeKind, dataPtr, adaptor.getValue(),
- LLVM::AtomicOrdering::acq_rel);
+ ptr::AtomicOrdering::acq_rel);
return success();
}
};
diff --git a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
index 9cd3a5ce65ce5f..4fea99d95d9b94 100644
--- a/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
+++ b/mlir/lib/Conversion/NVGPUToNVVM/NVGPUToNVVM.cpp
@@ -469,6 +469,7 @@ struct ConvertNVGPUToNVVMPass
});
populateNVGPUToNVVMConversionPatterns(converter, patterns);
LLVMConversionTarget target(getContext());
+ target.addLegalDialect<ptr::PtrDialect>();
target.addLegalDialect<::mlir::LLVM::LLVMDialect>();
target.addLegalDialect<::mlir::arith::ArithDialect>();
target.addLegalDialect<::mlir::memref::MemRefDialect>();
diff --git a/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp b/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
index 2f8b3f7e11de15..09e683dd9f0756 100644
--- a/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
+++ b/mlir/lib/Conversion/SCFToOpenMP/SCFToOpenMP.cpp
@@ -232,7 +232,7 @@ static omp::ReductionDeclareOp addAtomicRMW(OpBuilder &builder,
atomicBlock->getArgument(1));
builder.create<LLVM::AtomicRMWOp>(reduce.getLoc(), atomicKind,
atomicBlock->getArgument(0), loaded,
- LLVM::AtomicOrdering::monotonic);
+ ptr::AtomicOrdering::monotonic);
builder.create<omp::YieldOp>(reduce.getLoc(), ArrayRef<Value>());
return decl;
}
@@ -475,7 +475,7 @@ struct ParallelOpLowering : public OpRewritePattern<scf::ParallelOp> {
static LogicalResult applyPatterns(ModuleOp module, unsigned numThreads) {
ConversionTarget target(*module.getContext());
target.addIllegalOp<scf::ReduceOp, scf::ReduceReturnOp, scf::ParallelOp>();
- target.addLegalDialect<omp::OpenMPDialect, LLVM::LLVMDialect,
+ target.addLegalDialect<omp::OpenMPDialect, ptr::PtrDialect, LLVM::LLVMDialect,
memref::MemRefDialect>();
RewritePatternSet patterns(module.getContext());
diff --git a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
index 0e9eb9799c3e0b..5a472c78b66d4e 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/ConvertLaunchFuncToLLVMCalls.cpp
@@ -312,6 +312,7 @@ class LowerHostCodeToLLVM
populateSPIRVToLLVMTypeConversion(typeConverter);
ConversionTarget target(*context);
+ target.addLegalDialect<ptr::PtrDialect>();
target.addLegalDialect<LLVM::LLVMDialect>();
if (failed(applyPartialConversion(module, target, std::move(patterns))))
signalPassFailure();
diff --git a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVMPass.cpp b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVMPass.cpp
index 38091e449c56ee..1a3daafeb08c6b 100644
--- a/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVMPass.cpp
+++ b/mlir/lib/Conversion/SPIRVToLLVM/SPIRVToLLVMPass.cpp
@@ -58,6 +58,7 @@ void ConvertSPIRVToLLVMPass::runOnOperation() {
ConversionTarget target(*context);
target.addIllegalDialect<spirv::SPIRVDialect>();
+ target.addLegalDialect<ptr::PtrDialect>();
target.addLegalDialect<LLVM::LLVMDialect>();
if (clientAPI != spirv::ClientAPI::OpenCL &&
diff --git a/mlir/lib/Dialect/LLVMIR/CMakeLists.txt b/mlir/lib/Dialect/LLVMIR/CMakeLists.txt
index b00259677697a5..5fe90a5eaaf714 100644
--- a/mlir/lib/Dialect/LLVMIR/CMakeLists.txt
+++ b/mlir/lib/Dialect/LLVMIR/CMakeLists.txt
@@ -35,6 +35,7 @@ add_mlir_dialect_library(MLIRLLVMDialect
MLIRFunctionInterfaces
MLIRInferTypeOpInterface
MLIRIR
+ MLIRPtrDialect
MLIRMemorySlotInterfaces
MLIRSideEffectInterfaces
MLIRSupport
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMAttrs.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMAttrs.cpp
index 645a45dd96befb..4f9a9f53ab4fac 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMAttrs.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMAttrs.cpp
@@ -12,6 +12,7 @@
#include "mlir/Dialect/LLVMIR/LLVMAttrs.h"
#include "mlir/Dialect/LLVMIR/LLVMDialect.h"
+#include "mlir/Dialect/Ptr/IR/PtrAttrs.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/DialectImplementation.h"
#include "mlir/Interfaces/FunctionInterfaces.h"
@@ -248,3 +249,211 @@ TargetFeaturesAttr TargetFeaturesAttr::featuresAt(Operation *op) {
return parentFunction.getOperation()->getAttrOfType<TargetFeaturesAttr>(
getAttributeName());
}
+
+//===----------------------------------------------------------------------===//
+// AddressSpaceAttr
+//===----------------------------------------------------------------------===//
+
+static bool isLoadableType(Type type) {
+ return /*LLVM_PrimitiveType*/ (
+ LLVM::isCompatibleOuterType(type) &&
+ !isa<LLVM::LLVMVoidType, LLVM::LLVMFunctionType>(type)) &&
+ /*LLVM_OpaqueStruct*/
+ !(isa<LLVM::LLVMStructType>(type) &&
+ cast<LLVM::LLVMStructType>(type).isOpaque()) &&
+ /*LLVM_AnyTargetExt*/
+ !(isa<LLVM::LLVMTargetExtType>(type) &&
+ !cast<LLVM::LLVMTargetExtType>(type).supportsMemOps());
+}
+
+Dialect *AddressSpaceAttr::getModelOwner() const { return &getDialect(); }
+
+unsigned AddressSpaceAttr::getAddressSpace() const { return getAs(); }
+
+Attribute AddressSpaceAttr::getDefaultMemorySpace() const {
+ return AddressSpaceAttr::get(getContext(), 0);
+}
+
+bool AddressSpaceAttr::isLoadableType(Type type) const {
+ return ::isLoadableType(type);
+}
+
+bool AddressSpaceAttr::isStorableType(Type type) const {
+ return ::isLoadableType(type);
+}
+
+/// Verifies that all elements of `array` are instances of `Attr`.
+template <class AttrT>
+static LogicalResult isArrayOf(Operation *op, Attribute attr) {
+ if (!attr)
+ return success();
+ auto array = dyn_cast<ArrayAttr>(attr);
+ if (!array)
+ return failure();
+ for (Attribute iter : array)
+ if (!isa<AttrT>(iter))
+ return failure();
+ return success();
+}
+
+static LogicalResult verifyAliasAnalysisOpInterface(Operation *op) {
+ if (auto aliasScopes = op->getAttr("alias_scopes"))
+ if (failed(isArrayOf<AliasScopeAttr>(op, aliasScopes)))
+ return op->emitError() << "attribute '"
+ << "alias_scopes"
+ << "' failed to satisfy constraint: LLVM dialect "
+ "alias scope array";
+
+ if (auto noAliasScopes = op->getAttr("noalias_scopes"))
+ if (failed(isArrayOf<AliasScopeAttr>(op, noAliasScopes)))
+ return op->emitError() << "attribute '"
+ << "noalias_scopes"
+ << "' failed to satisfy constraint: LLVM dialect "
+ "alias scope array";
+
+ Attribute tags = op->getAttr("tbaa");
+ if (!tags)
+ return success();
+
+ if (failed(isArrayOf<TBAATagAttr>(op, tags)))
+ return op->emitError() << "attribute '"
+ << "tbaa"
+ << "' failed to satisfy constraint: LLVM dialect "
+ "TBAA tag metadata array";
+ return success();
+}
+
+static LogicalResult verifyAccessGroupOpInterface(Operation *op) {
+ if (auto accessGroups = op->getAttr("access_groups"))
+ if (failed(isArrayOf<AccessGroupAttr>(op, accessGroups)))
+ return op->emitError() << "attribute '"
+ << "access_groups"
+ << "' failed to satisfy constraint: LLVM dialect "
+ "access group metadata array";
+ return success();
+}
+
+/// Returns true if the given type is supported by atomic operations. All
+/// integer and float types with limited bit width are supported. Additionally,
+/// depending on the operation pointers may be supported as well.
+static bool isTypeCompatibleWithAtomicOp(Type type, bool isPointerTypeAllowed) {
+ if (llvm::isa<LLVMPointerType>(type))
+ return isPointerTypeAllowed;
+
+ std::optional<unsigned> bitWidth;
+ if (auto floatType = llvm::dyn_cast<FloatType>(type)) {
+ if (!isCompatibleFloatingPointType(type))
+ return false;
+ bitWidth = floatType.getWidth();
+ }
+ if (auto integerType = llvm::dyn_cast<IntegerType>(type))
+ bitWidth = integerType.getWidth();
+ // The type is neither an integer, float, or pointer type.
+ if (!bitWidth)
+ return false;
+ return *bitWidth == 8 || *bitWidth == 16 || *bitWidth == 32 ||
+ *bitWidth == 64;
+}
+
+/// Verifies the attributes and the type of atomic memory access operations.
+LogicalResult AddressSpaceAttr::verifyCompatibleAtomicOp(
+ ptr::AtomicOpInfo info,
+ ArrayRef<ptr::AtomicOrdering> unsupportedOrderings) const {
+ if (failed(verifyAliasAnalysisOpInterface(info.op)))
+ return failure();
+ if (failed(verifyAccessGroupOpInterface(info.op)))
+ return failure();
+ if (info.ordering != ptr::AtomicOrdering::not_atomic) {
+ if (!isTypeCompatibleWithAtomicOp(info.valueType,
+ /*isPointerTypeAllowed=*/true))
+ return info.op->emitOpError("unsupported type ")
+ << info.valueType << " for atomic access";
+ if (llvm::is_contained(unsupportedOrderings, info.ordering))
+ return info.op->emitOpError("unsupported ordering '")
+ << ptr::stringifyAtomicOrdering(info.ordering) << "'";
+ if (!info.alignment)
+ return info.op->emitOpError("expected alignment for atomic access");
+ return success();
+ }
+ if (info.syncScope)
+ return info.op->emitOpError(
+ "expected syncscope to be null for non-atomic access");
+ return success();
+}
+
+LogicalResult AddressSpaceAttr::verifyAtomicRMW(ptr::AtomicOpInfo info,
+ ptr::AtomicBinOp binOp) const {
+ if (failed(verifyAccessGroupOpInterface(info.op)))
+ return failure();
+ if (failed(verifyAliasAnalysisOpInterface(info.op)))
+ return failure();
+ auto valType = info.valueType;
+ if (binOp == ptr::AtomicBinOp::fadd || binOp == ptr::AtomicBinOp::fsub ||
+ binOp == ptr::AtomicBinOp::fmin || binOp == ptr::AtomicBinOp::fmax) {
+ if (!mlir::LLVM::isCompatibleFloatingPointType(valType))
+ return info.op->emitOpError("expected LLVM IR floating point type");
+ } else if (binOp == ptr::AtomicBinOp::xchg) {
+ if (!isTypeCompatibleWithAtomicOp(valType, /*isPointerTypeAllowed=*/true))
+ return info.op->emitOpError("unexpected LLVM IR type for 'xchg' bin_op");
+ } else {
+ auto intType = llvm::dyn_cast<IntegerType>(valType);
+ unsigned intBitWidth = intType ? intType.getWidth() : 0;
+ if (intBitWidth != 8 && intBitWidth != 16 && intBitWidth != 32 &&
+ intBitWidth != 64)
+ return info.op->emitOpError("expected LLVM IR integer type");
+ }
+
+ if (static_cast<unsigned>(info.ordering) <
+ static_cast<unsigned>(ptr::AtomicOrdering::monotonic))
+ return info.op->emitOpError()
+ << "expected at least '"
+ << ptr::stringifyAtomicOrdering(ptr::AtomicOrdering::monotonic)
+ << "' ordering";
+
+ return success();
+}
+
+LogicalResult AddressSpaceAttr::verifyAtomicAtomicCmpXchg(
+ ptr::AtomicOpInfo info, ptr::AtomicOrdering failureOrdering) const {
+ if (failed(verifyAccessGroupOpInterface(info.op)))
+ return failure();
+ if (failed(verifyAliasAnalysisOpInterface(info.op)))
+ return failure();
+ auto valType = info.valueType;
+ if (!isTypeCompatibleWithAtomicOp(valType,
+ /*isPointerTypeAllowed=*/true))
+ return info.op->emitOpError("unexpected LLVM IR type");
+ if (info.ordering < ptr::AtomicOrdering::monotonic ||
+ failureOrdering < ptr::AtomicOrdering::monotonic)
+ return info.op->emitOpError("ordering must be at least 'monotonic'");
+ if (failureOrdering == ptr::AtomicOrdering::release ||
+ failureOrdering == ptr::AtomicOrdering::acq_rel)
+ return info.op->emitOpError(
+ "failure ordering cannot be 'release' or 'acq_rel'");
+ return success();
+}
+
+template <typename Ty>
+static bool isScalarOrVectorOf(Type ty) {
+ return isa<Ty>(ty) || (LLVM::isCompatibleVectorType(ty) &&
+ isa<Ty>(LLVM::getVectorElementType(ty)));
+}
+
+LogicalResult AddressSpaceAttr::verifyPtrCast(Operation *op, Type tgt,
+ Type src) const {
+ if (!isScalarOrVectorOf<LLVMPointerType>(tgt))
+ return op->emitOpError() << "invalid target ptr-like type";
+ if (!isScalarOrVectorOf<LLVMPointerType>(src))
+ return op->emitOpError() << "invalid source ptr-like type";
+ return success();
+}
+
+LogicalResult AddressSpaceAttr::verifyIntCastTypes(Operation *op,
+ Type intLikeTy,
+ Type ptrLikeTy) const {
+ if (!isScalarOrVectorOf<IntegerType>(intLikeTy))
+ return op->emitOpError() << "invalid int-like type";
+ if (!isScalarOrVectorOf<LLVMPointerType>(ptrLikeTy))
+ return op->emitOpError() << "invalid ptr-like type";
+ return success();
+}
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
index 64388a9a01812a..4ab8c726646f8f 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp
@@ -17,6 +17,7 @@
#include "mlir/Dialect/LLVMIR/LLVMAttrs.h"
#include "mlir/Dialect/LLVMIR/LLVMInterfaces.h"
#include "mlir/Dialect/LLVMIR/LLVMTypes.h"
+#include "mlir/Dialect/Ptr/IR/PtrDialect.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/IR/BuiltinTypes.h"
@@ -754,22 +755,6 @@ Type GEPOp::getResultPtrElementType() {
// LoadOp
//===----------------------------------------------------------------------===//
-void LoadOp::getEffects(
- SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
- &effects) {
- effects.emplace_back(MemoryEffects::Read::get(), getAddr());
- // Volatile operations can have target-specific read-write effects on
- // memory besides the one referred to by the pointer operand.
- // Similarly, atomic operations that are monotonic or stricter cause
- // synchronization that from a language point-of-view, are arbitrary
- // read-writes into memory.
- if (getVolatile_() || (getOrdering() != AtomicOrdering::not_atomic &&
- getOrdering() != AtomicOrdering::unordered)) {
- effects.emplace_back(MemoryEffects::Write::get());
- effects.emplace_back(MemoryEffects::Read::get());
- }
-}
-
/// Returns true if the given type is supported by atomic operations. All
/// integer and float types with limited bit width are supported. Additionally,
/// depending on the operation pointers may be supported as well.
@@ -814,63 +799,6 @@ LogicalResult verifyAtomicMemOp(OpTy memOp, Type valueType,
return success();
}
-LogicalResult LoadOp::verify() {
- Type valueType = getResult().getType();
- return verifyAtomicMemOp(*this, valueType,
- {AtomicOrdering::release, AtomicOrdering::acq_rel});
-}
-
-void LoadOp::build(OpBuilder &builder, OperationState &state, Type type,
- Value addr, unsigned alignment, bool isVolatile,
- bool isNonTemporal, AtomicOrdering ordering,
- StringRef syncscope) {
- build(builder, state, type, addr,
- alignment ? builder.getI64IntegerAttr(alignment) : nullptr, isVolatile,
- isNonTemporal, ordering,
- syncscope.empty() ? nullptr : builder.getStringAttr(syncscope),
- /*access_groups=*/nullptr,
- /*alias_scopes=*/nullptr, /*noalias_scopes=*/nullptr,
- /*tbaa=*/nullptr);
-}
-
-//===----------------------------------------------------------------------===//
-// StoreOp
-//===----------------------------------------------------------------------===//
-
-void StoreOp::getEffects(
- SmallVectorImpl<SideEffects::EffectInstance<MemoryEffects::Effect>>
- &effects) {
- effects.emplace_back(MemoryEffects::Write::get(), getAddr());
- // Volatile operations can have target-specific read-write effects on
- // memory besides the one referred to by the pointer operand.
- // Similarly, atomic operations that are monotonic or stricter cause
- // synchronization that from a language point-of-view, are arbitrary
- // read-writes into memory.
- if (getVolatile_() || (getOrdering() != AtomicOrdering::not_atomic &&
- getOrdering() != AtomicOrdering::unordered)) {
- effects.emplace_back(MemoryEffects::Write::get());
- effects.emplace_back(MemoryEffects::Read::get());
- }
-}
-
-LogicalResult StoreOp::verify() {
- Type valueType = getValue().getType();
- return verifyAtomicMemOp(*this, valueType,
- {AtomicOrdering::acquire, AtomicOrdering::acq_rel});
-}
-
-void StoreOp::build(OpBuilder &builder, OperationState &state, Value value,
- Value addr, unsigned alignment, bool isVolatile,
- bool isNonTemporal, AtomicOrdering ordering,
- StringRef syncscope) {
- build(builder, state, value, addr,
- alignment ? builder.getI64IntegerAttr(alignment) : nullptr, isVolatile,
- isNonTemporal, ordering,
- syncscope.empty() ? nullptr : builder.getStringAttr(syncscope),
- /*access_groups=*/nullptr,
- /*alias_scopes=*/nullptr, /*noalias_scopes=*/nullptr, /*tbaa=*/nullptr);
-}
-
//===----------------------------------------------------------------------===//
// CallOp
//===----------------------------------------------------------------------===//
@@ -2602,47 +2530,6 @@ ConstantOp LLVM::ConstantOp::materialize(OpBuilder &builder, Attribute value,
// Constant op constant-folds to its value.
OpFoldResult LLVM::ConstantOp::fold(FoldAdaptor) { return getValue(); }
-//===----------------------------------------------------------------------===//
-// AtomicRMWOp
-//===----------------------------------------------------------------------===//
-
-void AtomicRMWOp::build(OpBuilder &builder, OperationState &state,
- AtomicBinOp binOp, Value ptr, Value val,
- AtomicOrdering ordering, StringRef syncscope,
- unsigned alignment, bool isVolatile) {
- build(builder, state, val.getType(), binOp, ptr, val, ordering,
- !syncscope.empty() ? builder.getStringAttr(syncscope) : nullptr,
- alignment ? builder.getI64IntegerAttr(alignment) : nullptr, isVolatile,
- /*access_groups=*/nullptr,
- /*alias_scopes=*/nullptr, /*noalias_scopes=*/nullptr, /*tbaa=*/nullptr);
-}
-
-LogicalResult AtomicRMWOp::verify() {
- auto valType = getVal().getType();
- if (getBinOp() == AtomicBinOp::fadd || getBinOp() == AtomicBinOp::fsub ||
- getBinOp() == AtomicBinOp::fmin || getBinOp() == AtomicBinOp::fmax) {
- if (!mlir::LLVM::isCompatibleFloatingPointType(valType))
- return emitOpError("expected LLVM IR floating point type");
- } else if (getBinOp() == AtomicBinOp::xchg) {
- if (!isTypeCompatibleWithAtomicOp(valType, /*isPointerTypeAllowed=*/true))
- return emitOpError("unexpected LLVM IR type for 'xchg' bin_op");
- } else {
- auto intType = llvm::dyn_cast<IntegerType>(valType);
- unsigned intBitWidth = intType ? intType.getWidth() : 0;
- if (intBitWidth != 8 && intBitWidth != 16 && intBitWidth != 32 &&
- intBitWidth != 64)
- return emitOpError("expected LLVM IR integer type");
- }
-
- if (static_cast<unsigned>(getOrdering()) <
- static_cast<unsigned>(AtomicOrdering::monotonic))
- return emitOpError() << "expected at least '"
- << stringifyAtomicOrdering(AtomicOrdering::monotonic)
- << "' ordering";
-
- return success();
-}
-
//===----------------------------------------------------------------------===//
// AtomicCmpXchgOp
//===----------------------------------------------------------------------===//
@@ -2806,26 +2693,11 @@ LogicalResult LLVM::BitcastOp::verify() {
// 'llvm.addrspacecast' must be used for this purpose instead.
if (resultType.getAddressSpace() != sourceType.getAddressSpace())
return emitOpError("cannot cast pointers of different address spaces, "
- "use 'llvm.addrspacecast' instead");
+ "use 'ptr.addrspacecast' instead");
return success();
}
-//===----------------------------------------------------------------------===//
-// Folder for LLVM::AddrSpaceCastOp
-//===----------------------------------------------------------------------===//
-
-OpFoldResult LLVM::AddrSpaceCastOp::fold(FoldAdaptor adaptor) {
- // addrcast(x : T0, T0) -> x
- if (getArg().getType() == getType())
- return getArg();
- // addrcast(addrcast(x : T0, T1), T0) -> x
- if (auto prev = getArg().getDefiningOp<AddrSpaceCastOp>())
- if (prev.getArg().getType() == getType())
- return prev.getArg();
- return {};
-}
-
//===----------------------------------------------------------------------===//
// Folder for LLVM::GEPOp
//===----------------------------------------------------------------------===//
@@ -2965,6 +2837,54 @@ LogicalResult LinkerOptionsOp::verify() {
//===----------------------------------------------------------------------===//
// LLVMDialect initialization, type parsing, and registration.
//===----------------------------------------------------------------------===//
+namespace {
+// Implementation of the `AccessGroupOpInterface` model.
+class AccessGroupOpInterfaceLoadImpl
+ : public LLVM::AccessGroupOpInterface::ExternalModel<
+ AccessGroupOpInterfaceLoadImpl, LoadOp> {};
+// Implementation of the `AccessGroupOpInterface` model.
+class AccessGroupOpInterfaceStoreImpl
+ : public LLVM::AccessGroupOpInterface::ExternalModel<
+ AccessGroupOpInterfaceStoreImpl, StoreOp> {};
+// Implementation of the `AccessGroupOpInterface` model.
+class AccessGroupOpInterfaceAtomicRMWImpl
+ : public LLVM::AccessGroupOpInterface::ExternalModel<
+ AccessGroupOpInterfaceAtomicRMWImpl, AtomicRMWOp> {};
+
+// Implementation of the `AccessGroupOpInterface` model.
+class AliasAnalysisOpInterfaceLoadImpl
+ : public LLVM::AliasAnalysisOpInterface::ExternalModel<
+ AliasAnalysisOpInterfaceLoadImpl, LoadOp> {
+public:
+ SmallVector<Value> getAccessedOperands(Operation *op) const {
+ if (auto loadOp = dyn_cast<LoadOp>(op))
+ return loadOp.getAccessedOperands();
+ return {};
+ }
+};
+// Implementation of the `AccessGroupOpInterface` model.
+class AliasAnalysisOpInterfaceStoreImpl
+ : public LLVM::AliasAnalysisOpInterface::ExternalModel<
+ AliasAnalysisOpInterfaceStoreImpl, StoreOp> {
+public:
+ SmallVector<Value> getAccessedOperands(Operation *op) const {
+ if (auto storeOp = dyn_cast<StoreOp>(op))
+ return storeOp.getAccessedOperands();
+ return {};
+ }
+};
+// Implementation of the `AccessGroupOpInterface` model.
+class AliasAnalysisOpInterfaceAtomicRMWImpl
+ : public LLVM::AliasAnalysisOpInterface::ExternalModel<
+ AliasAnalysisOpInterfaceAtomicRMWImpl, AtomicRMWOp> {
+public:
+ SmallVector<Value> getAccessedOperands(Operation *op) const {
+ if (auto atomicOp = dyn_cast<AtomicRMWOp>(op))
+ return atomicOp.getAccessedOperands();
+ return {};
+ }
+};
+} // namespace
void LLVMDialect::initialize() {
registerAttributes();
@@ -2994,6 +2914,16 @@ void LLVMDialect::initialize() {
addInterfaces<LLVMOpAsmDialectInterface>();
// clang-format on
detail::addLLVMInlinerInterface(this);
+
+ LoadOp::attachInterface<AccessGroupOpInterfaceLoadImpl,
+ AliasAnalysisOpInterfaceLoadImpl>(*getContext());
+
+ StoreOp::attachInterface<AccessGroupOpInterfaceStoreImpl,
+ AliasAnalysisOpInterfaceStoreImpl>(*getContext());
+
+ AtomicRMWOp::attachInterface<AccessGroupOpInterfaceAtomicRMWImpl,
+ AliasAnalysisOpInterfaceAtomicRMWImpl>(
+ *getContext());
}
#define GET_OP_CLASSES
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMInterfaces.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMInterfaces.cpp
index cff16afc73af3f..0031924cae9dd4 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMInterfaces.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMInterfaces.cpp
@@ -66,18 +66,6 @@ SmallVector<Value> mlir::LLVM::AtomicCmpXchgOp::getAccessedOperands() {
return {getPtr()};
}
-SmallVector<Value> mlir::LLVM::AtomicRMWOp::getAccessedOperands() {
- return {getPtr()};
-}
-
-SmallVector<Value> mlir::LLVM::LoadOp::getAccessedOperands() {
- return {getAddr()};
-}
-
-SmallVector<Value> mlir::LLVM::StoreOp::getAccessedOperands() {
- return {getAddr()};
-}
-
SmallVector<Value> mlir::LLVM::MemcpyOp::getAccessedOperands() {
return {getDst(), getSrc()};
}
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp
index 96a0ef591c1cfe..5f5667153d8a0d 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMMemorySlot.cpp
@@ -110,87 +110,20 @@ void LLVM::AllocaOp::handleDestructuringComplete(
// Interfaces for LoadOp/StoreOp
//===----------------------------------------------------------------------===//
-bool LLVM::LoadOp::loadsFrom(const MemorySlot &slot) {
- return getAddr() == slot.ptr;
-}
-
-bool LLVM::LoadOp::storesTo(const MemorySlot &slot) { return false; }
-
-Value LLVM::LoadOp::getStored(const MemorySlot &slot, RewriterBase &rewriter) {
- llvm_unreachable("getStored should not be called on LoadOp");
-}
-
-bool LLVM::StoreOp::loadsFrom(const MemorySlot &slot) { return false; }
-
-bool LLVM::StoreOp::storesTo(const MemorySlot &slot) {
- return getAddr() == slot.ptr;
-}
-
-Value LLVM::StoreOp::getStored(const MemorySlot &slot, RewriterBase &rewriter) {
- return getValue();
-}
-
-bool LLVM::LoadOp::canUsesBeRemoved(
- const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
- SmallVectorImpl<OpOperand *> &newBlockingUses) {
- if (blockingUses.size() != 1)
- return false;
- Value blockingUse = (*blockingUses.begin())->get();
- // If the blocking use is the slot ptr itself, there will be enough
- // context to reconstruct the result of the load at removal time, so it can
- // be removed (provided it loads the exact stored value and is not
- // volatile).
- return blockingUse == slot.ptr && getAddr() == slot.ptr &&
- getResult().getType() == slot.elemType && !getVolatile_();
-}
-
-DeletionKind LLVM::LoadOp::removeBlockingUses(
- const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
- RewriterBase &rewriter, Value reachingDefinition) {
- // `canUsesBeRemoved` checked this blocking use must be the loaded slot
- // pointer.
- rewriter.replaceAllUsesWith(getResult(), reachingDefinition);
- return DeletionKind::Delete;
-}
-
-bool LLVM::StoreOp::canUsesBeRemoved(
- const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
- SmallVectorImpl<OpOperand *> &newBlockingUses) {
- if (blockingUses.size() != 1)
- return false;
- Value blockingUse = (*blockingUses.begin())->get();
- // If the blocking use is the slot ptr itself, dropping the store is
- // fine, provided we are currently promoting its target value. Don't allow a
- // store OF the slot pointer, only INTO the slot pointer.
- return blockingUse == slot.ptr && getAddr() == slot.ptr &&
- getValue() != slot.ptr && getValue().getType() == slot.elemType &&
- !getVolatile_();
-}
-
-DeletionKind LLVM::StoreOp::removeBlockingUses(
- const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
- RewriterBase &rewriter, Value reachingDefinition) {
+DeletionKind LLVM::AddressSpaceAttr::removeStoreBlockingUses(
+ Operation *op, Value value, const MemorySlot &slot,
+ const SmallPtrSetImpl<OpOperand *> &blockingUses, RewriterBase &rewriter,
+ Value reachingDefinition) const {
// `canUsesBeRemoved` checked this blocking use must be the stored slot
// pointer.
for (Operation *user : slot.ptr.getUsers())
- if (auto declareOp = dyn_cast<LLVM::DbgDeclareOp>(user))
- rewriter.create<LLVM::DbgValueOp>(declareOp->getLoc(), getValue(),
+ if (auto declareOp = mlir::dyn_cast<LLVM::DbgDeclareOp>(user))
+ rewriter.create<LLVM::DbgValueOp>(declareOp->getLoc(), value,
declareOp.getVarInfo(),
declareOp.getLocationExpr());
return DeletionKind::Delete;
}
-LogicalResult LLVM::LoadOp::ensureOnlySafeAccesses(
- const MemorySlot &slot, SmallVectorImpl<MemorySlot> &mustBeSafelyUsed) {
- return success(getAddr() != slot.ptr || getType() == slot.elemType);
-}
-
-LogicalResult LLVM::StoreOp::ensureOnlySafeAccesses(
- const MemorySlot &slot, SmallVectorImpl<MemorySlot> &mustBeSafelyUsed) {
- return success(getAddr() != slot.ptr ||
- getValue().getType() == slot.elemType);
-}
-
//===----------------------------------------------------------------------===//
// Interfaces for discardable OPs
//===----------------------------------------------------------------------===//
@@ -215,17 +148,6 @@ DeletionKind LLVM::BitcastOp::removeBlockingUses(
return DeletionKind::Delete;
}
-bool LLVM::AddrSpaceCastOp::canUsesBeRemoved(
- const SmallPtrSetImpl<OpOperand *> &blockingUses,
- SmallVectorImpl<OpOperand *> &newBlockingUses) {
- return forwardToUsers(*this, newBlockingUses);
-}
-
-DeletionKind LLVM::AddrSpaceCastOp::removeBlockingUses(
- const SmallPtrSetImpl<OpOperand *> &blockingUses, RewriterBase &rewriter) {
- return DeletionKind::Delete;
-}
-
bool LLVM::LifetimeStartOp::canUsesBeRemoved(
const SmallPtrSetImpl<OpOperand *> &blockingUses,
SmallVectorImpl<OpOperand *> &newBlockingUses) {
diff --git a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
index 443e245887ea8e..6d0800b02ce1c4 100644
--- a/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
+++ b/mlir/lib/Dialect/LLVMIR/IR/LLVMTypes.cpp
@@ -253,148 +253,6 @@ LLVMFunctionType::verify(function_ref<InFlightDiagnostic()> emitError,
return success();
}
-//===----------------------------------------------------------------------===//
-// DataLayoutTypeInterface
-
-constexpr const static uint64_t kDefaultPointerSizeBits = 64;
-constexpr const static uint64_t kDefaultPointerAlignment = 8;
-
-std::optional<uint64_t> mlir::LLVM::extractPointerSpecValue(Attribute attr,
- PtrDLEntryPos pos) {
- auto spec = cast<DenseIntElementsAttr>(attr);
- auto idx = static_cast<int64_t>(pos);
- if (idx >= spec.size())
- return std::nullopt;
- return spec.getValues<uint64_t>()[idx];
-}
-
-/// Returns the part of the data layout entry that corresponds to `pos` for the
-/// given `type` by interpreting the list of entries `params`. For the pointer
-/// type in the default address space, returns the default value if the entries
-/// do not provide a custom one, for other address spaces returns std::nullopt.
-static std::optional<uint64_t>
-getPointerDataLayoutEntry(DataLayoutEntryListRef params, LLVMPointerType type,
- PtrDLEntryPos pos) {
- // First, look for the entry for the pointer in the current address space.
- Attribute currentEntry;
- for (DataLayoutEntryInterface entry : params) {
- if (!entry.isTypeEntry())
- continue;
- if (cast<LLVMPointerType>(entry.getKey().get<Type>()).getAddressSpace() ==
- type.getAddressSpace()) {
- currentEntry = entry.getValue();
- break;
- }
- }
- if (currentEntry) {
- return *extractPointerSpecValue(currentEntry, pos) /
- (pos == PtrDLEntryPos::Size ? 1 : kBitsInByte);
- }
-
- // If not found, and this is the pointer to the default memory space, assume
- // 64-bit pointers.
- if (type.getAddressSpace() == 0) {
- return pos == PtrDLEntryPos::Size ? kDefaultPointerSizeBits
- : kDefaultPointerAlignment;
- }
-
- return std::nullopt;
-}
-
-llvm::TypeSize
-LLVMPointerType::getTypeSizeInBits(const DataLayout &dataLayout,
- DataLayoutEntryListRef params) const {
- if (std::optional<uint64_t> size =
- getPointerDataLayoutEntry(params, *this, PtrDLEntryPos::Size))
- return llvm::TypeSize::getFixed(*size);
-
- // For other memory spaces, use the size of the pointer to the default memory
- // space.
- return dataLayout.getTypeSizeInBits(get(getContext()));
-}
-
-uint64_t LLVMPointerType::getABIAlignment(const DataLayout &dataLayout,
- DataLayoutEntryListRef params) const {
- if (std::optional<uint64_t> alignment =
- getPointerDataLayoutEntry(params, *this, PtrDLEntryPos::Abi))
- return *alignment;
-
- return dataLayout.getTypeABIAlignment(get(getContext()));
-}
-
-uint64_t
-LLVMPointerType::getPreferredAlignment(const DataLayout &dataLayout,
- DataLayoutEntryListRef params) const {
- if (std::optional<uint64_t> alignment =
- getPointerDataLayoutEntry(params, *this, PtrDLEntryPos::Preferred))
- return *alignment;
-
- return dataLayout.getTypePreferredAlignment(get(getContext()));
-}
-
-bool LLVMPointerType::areCompatible(DataLayoutEntryListRef oldLayout,
- DataLayoutEntryListRef newLayout) const {
- for (DataLayoutEntryInterface newEntry : newLayout) {
- if (!newEntry.isTypeEntry())
- continue;
- uint64_t size = kDefaultPointerSizeBits;
- uint64_t abi = kDefaultPointerAlignment;
- auto newType = llvm::cast<LLVMPointerType>(newEntry.getKey().get<Type>());
- const auto *it =
- llvm::find_if(oldLayout, [&](DataLayoutEntryInterface entry) {
- if (auto type = llvm::dyn_cast_if_present<Type>(entry.getKey())) {
- return llvm::cast<LLVMPointerType>(type).getAddressSpace() ==
- newType.getAddressSpace();
- }
- return false;
- });
- if (it == oldLayout.end()) {
- llvm::find_if(oldLayout, [&](DataLayoutEntryInterface entry) {
- if (auto type = llvm::dyn_cast_if_present<Type>(entry.getKey())) {
- return llvm::cast<LLVMPointerType>(type).getAddressSpace() == 0;
- }
- return false;
- });
- }
- if (it != oldLayout.end()) {
- size = *extractPointerSpecValue(*it, PtrDLEntryPos::Size);
- abi = *extractPointerSpecValue(*it, PtrDLEntryPos::Abi);
- }
-
- Attribute newSpec = llvm::cast<DenseIntElementsAttr>(newEntry.getValue());
- uint64_t newSize = *extractPointerSpecValue(newSpec, PtrDLEntryPos::Size);
- uint64_t newAbi = *extractPointerSpecValue(newSpec, PtrDLEntryPos::Abi);
- if (size != newSize || abi < newAbi || abi % newAbi != 0)
- return false;
- }
- return true;
-}
-
-LogicalResult LLVMPointerType::verifyEntries(DataLayoutEntryListRef entries,
- Location loc) const {
- for (DataLayoutEntryInterface entry : entries) {
- if (!entry.isTypeEntry())
- continue;
- auto key = entry.getKey().get<Type>();
- auto values = llvm::dyn_cast<DenseIntElementsAttr>(entry.getValue());
- if (!values || (values.size() != 3 && values.size() != 4)) {
- return emitError(loc)
- << "expected layout attribute for " << key
- << " to be a dense integer elements attribute with 3 or 4 "
- "elements";
- }
- if (!values.getElementType().isInteger(64))
- return emitError(loc) << "expected i64 parameters for " << key;
-
- if (extractPointerSpecValue(values, PtrDLEntryPos::Abi) >
- extractPointerSpecValue(values, PtrDLEntryPos::Preferred)) {
- return emitError(loc) << "preferred alignment is expected to be at least "
- "as large as ABI alignment";
- }
- }
- return success();
-}
-
//===----------------------------------------------------------------------===//
// Struct type.
//===----------------------------------------------------------------------===//
@@ -760,6 +618,7 @@ bool mlir::LLVM::isCompatibleOuterType(Type type) {
LLVMMetadataType,
LLVMPPCFP128Type,
LLVMPointerType,
+ ptr::PtrType,
LLVMStructType,
LLVMTokenType,
LLVMFixedVectorType,
@@ -1024,3 +883,41 @@ Type LLVMDialect::parseType(DialectAsmParser &parser) const {
void LLVMDialect::printType(Type type, DialectAsmPrinter &os) const {
return detail::printType(type, os);
}
+
+//===----------------------------------------------------------------------===//
+// LLVMPointerType
+//===----------------------------------------------------------------------===//
+
+Type LLVMPointerType::parse(AsmParser &odsParser) {
+ FailureOr<unsigned> addressSpace;
+ // Parse literal '<'
+ if (!odsParser.parseOptionalLess()) {
+ if (failed(addressSpace = FieldParser<unsigned>::parse(odsParser))) {
+ odsParser.emitError(odsParser.getCurrentLocation(),
+ "failed to parse LLVMPtrType parameter 'memorySpace' "
+ "which is to be a `unsigned`");
+ return {};
+ }
+ // Parse literal '>'
+ if (odsParser.parseGreater())
+ return {};
+ }
+ return LLVMPointerType::get(odsParser.getContext(), addressSpace.value_or(0));
+}
+
+void LLVMPointerType::print(AsmPrinter &odsPrinter) const {
+ if (unsigned as = getAddressSpace(); as != 0)
+ odsPrinter << "<" << as << ">";
+}
+
+ptr::PtrType LLVMPointerType::get(MLIRContext *context, unsigned addressSpace) {
+ return ptr::PtrType::get(context,
+ AddressSpaceAttr::get(context, addressSpace));
+}
+
+bool mlir::LLVM::isLLVMPointerType(Type type) {
+ if (auto ptrTy = mlir::dyn_cast<ptr::PtrType>(type))
+ return ptrTy.getMemorySpace() &&
+ mlir::isa<AddressSpaceAttr>(ptrTy.getMemorySpace());
+ return false;
+}
diff --git a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
index 45e0632db5ef2b..520de839d408a1 100644
--- a/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
+++ b/mlir/lib/Dialect/OpenACC/IR/OpenACC.cpp
@@ -37,7 +37,7 @@ struct MemRefPointerLikeModel
struct LLVMPointerPointerLikeModel
: public PointerLikeType::ExternalModel<LLVMPointerPointerLikeModel,
- LLVM::LLVMPointerType> {
+ ptr::PtrType> {
Type getElementType(Type pointer) const { return Type(); }
};
} // namespace
@@ -64,8 +64,7 @@ void OpenACCDialect::initialize() {
// the other dialects. This is probably better than having dialects like LLVM
// and memref be dependent on OpenACC.
MemRefType::attachInterface<MemRefPointerLikeModel>(*getContext());
- LLVM::LLVMPointerType::attachInterface<LLVMPointerPointerLikeModel>(
- *getContext());
+ ptr::PtrType::attachInterface<LLVMPointerPointerLikeModel>(*getContext());
}
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
index 6e69cd0d386bd2..1e57206ee07856 100644
--- a/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
+++ b/mlir/lib/Dialect/OpenMP/IR/OpenMPDialect.cpp
@@ -49,7 +49,7 @@ struct MemRefPointerLikeModel
struct LLVMPointerPointerLikeModel
: public PointerLikeType::ExternalModel<LLVMPointerPointerLikeModel,
- LLVM::LLVMPointerType> {
+ ptr::PtrType> {
Type getElementType(Type pointer) const { return Type(); }
};
@@ -79,8 +79,7 @@ void OpenMPDialect::initialize() {
addInterface<OpenMPDialectFoldInterface>();
MemRefType::attachInterface<MemRefPointerLikeModel>(*getContext());
- LLVM::LLVMPointerType::attachInterface<LLVMPointerPointerLikeModel>(
- *getContext());
+ ptr::PtrType::attachInterface<LLVMPointerPointerLikeModel>(*getContext());
// Attach default offload module interface to module op to access
// offload functionality through
diff --git a/mlir/lib/ExecutionEngine/CMakeLists.txt b/mlir/lib/ExecutionEngine/CMakeLists.txt
index 2f391b7698cbb0..1f059df687405f 100644
--- a/mlir/lib/ExecutionEngine/CMakeLists.txt
+++ b/mlir/lib/ExecutionEngine/CMakeLists.txt
@@ -93,6 +93,7 @@ add_mlir_library(MLIRExecutionEngine
MLIRLLVMDialect
MLIRLLVMToLLVMIRTranslation
MLIROpenMPToLLVMIRTranslation
+ MLIRPtrToLLVMIRTranslation
MLIRTargetLLVMIRExport
)
@@ -118,6 +119,7 @@ add_mlir_library(MLIRJitRunner
MLIRIR
MLIRParser
MLIRLLVMToLLVMIRTranslation
+ MLIRPtrToLLVMIRTranslation
MLIRTargetLLVMIRExport
MLIRTransforms
MLIRSupport
diff --git a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
index 1722d74c08b628..42103c4f7a1ae8 100644
--- a/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
+++ b/mlir/lib/Target/LLVMIR/ModuleTranslation.cpp
@@ -254,8 +254,8 @@ translateDataLayout(DataLayoutSpecInterface attribute,
uint64_t preferred =
dataLayout.getTypePreferredAlignment(type) * 8u;
layoutStream << size << ":" << abi << ":" << preferred;
- if (std::optional<uint64_t> index = extractPointerSpecValue(
- entry.getValue(), PtrDLEntryPos::Index))
+ if (std::optional<uint64_t> index = ptr::extractPointerSpecValue(
+ entry.getValue(), ptr::PtrDLEntryPos::Index))
layoutStream << ":" << *index;
return success();
})
diff --git a/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir b/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
index 4077edc7420dca..3544226187fa26 100644
--- a/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
+++ b/mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir
@@ -11,7 +11,7 @@ func.func @create_token() {
func.func @create_value() {
// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[OFFSET:.*]] = llvm.getelementptr %[[NULL]][1]
- // CHECK: %[[SIZE:.*]] = llvm.ptrtoint %[[OFFSET]]
+ // CHECK: %[[SIZE:.*]] = ptr.ptrtoint %[[OFFSET]]
// CHECK: %[[VALUE:.*]] = call @mlirAsyncRuntimeCreateValue(%[[SIZE]])
%0 = async.runtime.create : !async.value<f32>
return
@@ -152,7 +152,7 @@ func.func @store() {
// CHECK: %[[VALUE:.*]] = call @mlirAsyncRuntimeCreateValue
%1 = async.runtime.create : !async.value<f32>
// CHECK: %[[P0:.*]] = call @mlirAsyncRuntimeGetValueStorage(%[[VALUE]])
- // CHECK: llvm.store %[[CST]], %[[P0]] : f32, !llvm.ptr
+ // CHECK: ptr.store %[[CST]], %[[P0]] : f32, !llvm.ptr
async.runtime.store %0, %1 : !async.value<f32>
return
}
@@ -162,7 +162,7 @@ func.func @load() -> f32 {
// CHECK: %[[VALUE:.*]] = call @mlirAsyncRuntimeCreateValue
%0 = async.runtime.create : !async.value<f32>
// CHECK: %[[P0:.*]] = call @mlirAsyncRuntimeGetValueStorage(%[[VALUE]])
- // CHECK: %[[VALUE:.*]] = llvm.load %[[P0]] : !llvm.ptr -> f32
+ // CHECK: %[[VALUE:.*]] = ptr.load %[[P0]] : !llvm.ptr -> f32
%1 = async.runtime.load %0 : !async.value<f32>
// CHECK: return %[[VALUE]] : f32
return %1 : f32
diff --git a/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir b/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir
index dd54bdb7987244..25a545827ef626 100644
--- a/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir
+++ b/mlir/test/Conversion/AsyncToLLVM/convert-to-llvm.mlir
@@ -227,7 +227,7 @@ func.func @execute_and_return_f32() -> f32 {
}
// CHECK: %[[STORAGE:.*]] = call @mlirAsyncRuntimeGetValueStorage(%[[RET]]#1)
- // CHECK: %[[LOADED:.*]] = llvm.load %[[STORAGE]] : !llvm.ptr -> f32
+ // CHECK: %[[LOADED:.*]] = ptr.load %[[STORAGE]] : !llvm.ptr -> f32
%0 = async.await %result : !async.value<f32>
return %0 : f32
@@ -246,7 +246,7 @@ func.func @execute_and_return_f32() -> f32 {
// Emplace result value.
// CHECK: %[[CST:.*]] = arith.constant 1.230000e+02 : f32
// CHECK: %[[STORAGE:.*]] = call @mlirAsyncRuntimeGetValueStorage(%[[VALUE]])
-// CHECK: llvm.store %[[CST]], %[[STORAGE]] : f32, !llvm.ptr
+// CHECK: ptr.store %[[CST]], %[[STORAGE]] : f32, !llvm.ptr
// CHECK: call @mlirAsyncRuntimeEmplaceValue(%[[VALUE]])
// Emplace result token.
@@ -293,7 +293,7 @@ func.func @async_value_operands() {
// Get the operand value storage, cast to f32 and add the value.
// CHECK: %[[STORAGE:.*]] = call @mlirAsyncRuntimeGetValueStorage(%arg0)
-// CHECK: %[[LOADED:.*]] = llvm.load %[[STORAGE]] : !llvm.ptr -> f32
+// CHECK: %[[LOADED:.*]] = ptr.load %[[STORAGE]] : !llvm.ptr -> f32
// CHECK: arith.addf %[[LOADED]], %[[LOADED]] : f32
// Emplace result token.
diff --git a/mlir/test/Conversion/FuncToLLVM/calling-convention.mlir b/mlir/test/Conversion/FuncToLLVM/calling-convention.mlir
index 7cdb89e1f72d28..8cdc8ea508d6ee 100644
--- a/mlir/test/Conversion/FuncToLLVM/calling-convention.mlir
+++ b/mlir/test/Conversion/FuncToLLVM/calling-convention.mlir
@@ -24,7 +24,7 @@ func.func private @external(%arg0: memref<?x?xf32>, %arg1: memref<f32>)
// Allocate on stack and store to comply with C calling convention.
// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : index)
// CHECK: %[[DESC0_ALLOCA:.*]] = llvm.alloca %[[C1]] x !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: llvm.store %[[DESC07]], %[[DESC0_ALLOCA]]
+ // CHECK: ptr.store %[[DESC07]], %[[DESC0_ALLOCA]]
// Populate the descriptor for arg1.
// CHECK: %[[DESC10:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64)>
@@ -35,7 +35,7 @@ func.func private @external(%arg0: memref<?x?xf32>, %arg1: memref<f32>)
// Allocate on stack and store to comply with C calling convention.
// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : index)
// CHECK: %[[DESC1_ALLOCA:.*]] = llvm.alloca %[[C1]] x !llvm.struct<(ptr, ptr, i64)>
- // CHECK: llvm.store %[[DESC13]], %[[DESC1_ALLOCA]]
+ // CHECK: ptr.store %[[DESC13]], %[[DESC1_ALLOCA]]
// Call the interface function.
// CHECK: llvm.call @_mlir_ciface_external
@@ -83,7 +83,7 @@ func.func @callee(%arg0: memref<?xf32>, %arg1: index) {
// CHECK-LABEL: @_mlir_ciface_callee
// CHECK: %[[ARG0:.*]]: !llvm.ptr
// Load the memref descriptor pointer.
- // CHECK: %[[DESC:.*]] = llvm.load %[[ARG0]] : !llvm.ptr -> !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
+ // CHECK: %[[DESC:.*]] = ptr.load %[[ARG0]] : !llvm.ptr -> !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// Extract individual components of the descriptor.
// CHECK: %[[ALLOC:.*]] = llvm.extractvalue %[[DESC]][0]
@@ -262,7 +262,7 @@ func.func @bare_ptr_calling_conv(%arg0: memref<4x3xf32>, %arg1 : index, %arg2 :
// CHECK: %[[ALIGNEDPTR:.*]] = llvm.extractvalue %[[INSERT_STRIDE1]][1]
// CHECK: %[[STOREPTR:.*]] = llvm.getelementptr %[[ALIGNEDPTR]]
- // CHECK: llvm.store %{{.*}}, %[[STOREPTR]]
+ // CHECK: ptr.store %{{.*}}, %[[STOREPTR]]
memref.store %arg3, %arg0[%arg1, %arg2] : memref<4x3xf32>
// CHECK: llvm.return %[[ARG0]]
@@ -290,12 +290,12 @@ func.func @bare_ptr_calling_conv_multiresult(%arg0: memref<4x3xf32>, %arg1 : ind
// CHECK: %[[ALIGNEDPTR:.*]] = llvm.extractvalue %[[INSERT_STRIDE1]][1]
// CHECK: %[[STOREPTR:.*]] = llvm.getelementptr %[[ALIGNEDPTR]]
- // CHECK: llvm.store %{{.*}}, %[[STOREPTR]]
+ // CHECK: ptr.store %{{.*}}, %[[STOREPTR]]
memref.store %arg3, %arg0[%arg1, %arg2] : memref<4x3xf32>
// CHECK: %[[ALIGNEDPTR0:.*]] = llvm.extractvalue %[[INSERT_STRIDE1]][1]
// CHECK: %[[LOADPTR:.*]] = llvm.getelementptr %[[ALIGNEDPTR0]]
- // CHECK: %[[RETURN0:.*]] = llvm.load %[[LOADPTR]]
+ // CHECK: %[[RETURN0:.*]] = ptr.load %[[LOADPTR]]
%0 = memref.load %arg0[%arg1, %arg2] : memref<4x3xf32>
// CHECK: %[[RETURN_DESC:.*]] = llvm.mlir.undef : !llvm.struct<(f32, ptr)>
diff --git a/mlir/test/Conversion/GPUCommon/lower-alloc-to-gpu-runtime-calls.mlir b/mlir/test/Conversion/GPUCommon/lower-alloc-to-gpu-runtime-calls.mlir
index ae8b7aaac7fd94..d8898c9b93a244 100644
--- a/mlir/test/Conversion/GPUCommon/lower-alloc-to-gpu-runtime-calls.mlir
+++ b/mlir/test/Conversion/GPUCommon/lower-alloc-to-gpu-runtime-calls.mlir
@@ -7,7 +7,7 @@ module attributes {gpu.container_module} {
// CHECK: %[[stream:.*]] = llvm.call @mgpuStreamCreate()
%0 = gpu.wait async
// CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}}[%[[size]]]
- // CHECK: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]]
+ // CHECK: %[[size_bytes:.*]] = ptr.ptrtoint %[[gep]]
// CHECK: %[[isHostShared:.*]] = llvm.mlir.constant
// CHECK: llvm.call @mgpuMemAlloc(%[[size_bytes]], %[[stream]], %[[isHostShared]])
%1, %2 = gpu.alloc async [%0] (%size) : memref<?xf32>
@@ -24,7 +24,7 @@ module attributes {gpu.container_module} {
// CHECK-SAME: %[[size:.*]]: i64
func.func @alloc_sync(%size : index) {
// CHECK: %[[gep:.*]] = llvm.getelementptr {{.*}}[%[[size]]]
- // CHECK: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]]
+ // CHECK: %[[size_bytes:.*]] = ptr.ptrtoint %[[gep]]
// CHECK: %[[nullptr:.*]] = llvm.mlir.zero
// CHECK: %[[isHostShared:.*]] = llvm.mlir.constant
// CHECK: llvm.call @mgpuMemAlloc(%[[size_bytes]], %[[nullptr]], %[[isHostShared]])
diff --git a/mlir/test/Conversion/GPUCommon/lower-memcpy-to-gpu-runtime-calls.mlir b/mlir/test/Conversion/GPUCommon/lower-memcpy-to-gpu-runtime-calls.mlir
index 3f86b076982795..2985db83d9b141 100644
--- a/mlir/test/Conversion/GPUCommon/lower-memcpy-to-gpu-runtime-calls.mlir
+++ b/mlir/test/Conversion/GPUCommon/lower-memcpy-to-gpu-runtime-calls.mlir
@@ -6,9 +6,9 @@ module attributes {gpu.container_module} {
func.func @foo(%dst : memref<7xf32, 1>, %src : memref<7xf32>) {
// CHECK: %[[t0:.*]] = llvm.call @mgpuStreamCreate
%t0 = gpu.wait async
- // CHECK: %[[size_bytes:.*]] = llvm.ptrtoint
- // CHECK-NOT: llvm.addrspacecast
- // CHECK: %[[addr_cast:.*]] = llvm.addrspacecast
+ // CHECK: %[[size_bytes:.*]] = ptr.ptrtoint
+ // CHECK-NOT: ptr.addrspacecast
+ // CHECK: %[[addr_cast:.*]] = ptr.addrspacecast
// CHECK: llvm.call @mgpuMemcpy(%[[addr_cast]], %{{.*}}, %[[size_bytes]], %[[t0]])
%t1 = gpu.memcpy async [%t0] %dst, %src : memref<7xf32, 1>, memref<7xf32>
// CHECK: llvm.call @mgpuStreamSynchronize(%[[t0]])
diff --git a/mlir/test/Conversion/GPUCommon/lower-memory-space-attrs.mlir b/mlir/test/Conversion/GPUCommon/lower-memory-space-attrs.mlir
index 771f3185904bb8..eb54d99a21bcee 100644
--- a/mlir/test/Conversion/GPUCommon/lower-memory-space-attrs.mlir
+++ b/mlir/test/Conversion/GPUCommon/lower-memory-space-attrs.mlir
@@ -10,7 +10,7 @@ gpu.module @kernel {
}
// CHECK-LABEL: llvm.func @private
-// CHECK: llvm.store
+// CHECK: ptr.store
// ROCDL-SAME: : f32, !llvm.ptr<5>
// NVVM-SAME: : f32, !llvm.ptr
@@ -26,7 +26,7 @@ gpu.module @kernel {
}
// CHECK-LABEL: llvm.func @workgroup
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: : f32, !llvm.ptr<3>
// -----
@@ -41,9 +41,9 @@ gpu.module @kernel {
}
// CHECK-LABEL: llvm.func @nested_memref
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-SAME: : !llvm.ptr<1>
-// CHECK: [[value:%.+]] = llvm.load
+// CHECK: [[value:%.+]] = ptr.load
// CHECK-SAME: : !llvm.ptr<1> -> f32
// CHECK: llvm.return [[value]]
@@ -64,8 +64,8 @@ gpu.module @kernel {
// NVVM: llvm.mlir.global internal @__dynamic_shmem__0() {addr_space = 3 : i32, alignment = 16 : i64} : !llvm.array<0 x i8>
// CHECK-LABEL: llvm.func @dynamic_shmem_with_vector
// CHECK: llvm.mlir.addressof @__dynamic_shmem__0 : !llvm.ptr<3>
-// CHECK: llvm.load %{{.*}} {alignment = 4 : i64} : !llvm.ptr<3> -> vector<1xf32>
-// CHECK: llvm.store
+// CHECK: ptr.load %{{.*}} {alignment = 4 : i64} : !llvm.ptr<3> -> vector<1xf32>
+// CHECK: ptr.store
// -----
@@ -80,6 +80,6 @@ gpu.module @kernel {
}
// CHECK-LABEL: llvm.func @dynamic_shmem
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: : f32, !llvm.ptr<3>
diff --git a/mlir/test/Conversion/GPUCommon/lower-memset-to-gpu-runtime-calls.mlir b/mlir/test/Conversion/GPUCommon/lower-memset-to-gpu-runtime-calls.mlir
index aaced31813d574..a5752139b35a84 100644
--- a/mlir/test/Conversion/GPUCommon/lower-memset-to-gpu-runtime-calls.mlir
+++ b/mlir/test/Conversion/GPUCommon/lower-memset-to-gpu-runtime-calls.mlir
@@ -7,7 +7,7 @@ module attributes {gpu.container_module} {
// CHECK: %[[t0:.*]] = llvm.call @mgpuStreamCreate
%t0 = gpu.wait async
// CHECK: %[[size_bytes:.*]] = llvm.mlir.constant
- // CHECK: %[[addr_cast:.*]] = llvm.addrspacecast
+ // CHECK: %[[addr_cast:.*]] = ptr.addrspacecast
// CHECK: llvm.call @mgpuMemset32(%[[addr_cast]], %{{.*}}, %[[size_bytes]], %[[t0]])
%t1 = gpu.memset async [%t0] %dst, %value : memref<7xf32, 1>, f32
// CHECK: llvm.call @mgpuStreamSynchronize(%[[t0]])
diff --git a/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir b/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
index 4fc19b8e93646c..be1de375d2c004 100644
--- a/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
+++ b/mlir/test/Conversion/GPUCommon/memory-attrbution.mlir
@@ -36,11 +36,11 @@ gpu.module @kernel {
// we emit some core instructions.
// NVVM: llvm.extractvalue %[[descr6:.*]]
// NVVM: llvm.getelementptr
- // NVVM: llvm.store
+ // NVVM: ptr.store
// ROCDL: llvm.extractvalue %[[descr6:.*]]
// ROCDL: llvm.getelementptr
- // ROCDL: llvm.store
+ // ROCDL: ptr.store
%c0 = arith.constant 0 : index
memref.store %arg0, %arg1[%c0] : memref<4xf32, #gpu.address_space<private>>
@@ -100,11 +100,11 @@ gpu.module @kernel {
// we emit some core instructions.
// NVVM: llvm.extractvalue %[[descr6:.*]]
// NVVM: llvm.getelementptr
- // NVVM: llvm.store
+ // NVVM: ptr.store
// ROCDL: llvm.extractvalue %[[descr6:.*]]
// ROCDL: llvm.getelementptr
- // ROCDL: llvm.store
+ // ROCDL: ptr.store
%c0 = arith.constant 0 : index
memref.store %arg0, %arg1[%c0] : memref<4xf32, #gpu.address_space<workgroup>>
diff --git a/mlir/test/Conversion/GPUCommon/transfer_write.mlir b/mlir/test/Conversion/GPUCommon/transfer_write.mlir
index cba85915b49e43..5587730641c74b 100644
--- a/mlir/test/Conversion/GPUCommon/transfer_write.mlir
+++ b/mlir/test/Conversion/GPUCommon/transfer_write.mlir
@@ -6,7 +6,7 @@
// CHECK:%[[val:[0-9]+]] = llvm.extractelement
// CHECK:%[[base:[0-9]+]] = llvm.extractvalue
// CHECK:%[[ptr:[0-9]+]] = llvm.getelementptr %[[base]]
- // CHECK:llvm.store %[[val]], %[[ptr]]
+ // CHECK:ptr.store %[[val]], %[[ptr]]
vector.transfer_write %arg3, %arg1[%c0, %c0] {in_bounds = [true]} : vector<1xf32>, memref<1024x1024xf32>
}
return
diff --git a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
index 20a200e812c125..836fe1d03de128 100644
--- a/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/GPUToNVVM/gpu-to-nvvm.mlir
@@ -564,9 +564,9 @@ gpu.module @test_module_29 {
// CHECK-NEXT: %[[O:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK-NEXT: %[[ALLOC:.*]] = llvm.alloca %[[O]] x !llvm.struct<(i32, f64)> : (i64) -> !llvm.ptr
// CHECK-NEXT: %[[EL0:.*]] = llvm.getelementptr %[[ALLOC]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i32, f64)>
- // CHECK-NEXT: llvm.store %[[ARG0]], %[[EL0]] : i32, !llvm.ptr
+ // CHECK-NEXT: ptr.store %[[ARG0]], %[[EL0]] : i32, !llvm.ptr
// CHECK-NEXT: %[[EL1:.*]] = llvm.getelementptr %[[ALLOC]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i32, f64)>
- // CHECK-NEXT: llvm.store %[[EXT]], %[[EL1]] : f64, !llvm.ptr
+ // CHECK-NEXT: ptr.store %[[EXT]], %[[EL1]] : f64, !llvm.ptr
// CHECK-NEXT: llvm.call @vprintf(%[[FORMATSTART]], %[[ALLOC]]) : (!llvm.ptr, !llvm.ptr) -> i32
gpu.printf "Hello: %d\n" %arg0, %arg1 : i32, f32
gpu.return
@@ -653,7 +653,7 @@ module attributes {transform.with_named_sequence} {
use_bare_ptr_memref_call_conv = true,
use_opaque_pointers = true}
} {
- legal_dialects = ["llvm", "memref", "nvvm", "test"],
+ legal_dialects = ["llvm", "ptr", "memref", "nvvm", "test"],
legal_ops = ["func.func", "gpu.module", "gpu.module_end", "gpu.yield"],
illegal_dialects = ["gpu"],
illegal_ops = ["llvm.cos", "llvm.exp", "llvm.exp2", "llvm.fabs", "llvm.fceil",
diff --git a/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir b/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir
index fe8a36ee29a9f0..ec7adb474089eb 100644
--- a/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir
+++ b/mlir/test/Conversion/GPUToVulkan/invoke-vulkan.mlir
@@ -24,7 +24,7 @@ module attributes {gpu.container_module} {
%1 = llvm.mlir.zero : !llvm.ptr
%2 = llvm.mlir.constant(1 : index) : i64
%3 = llvm.getelementptr %1[%2] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- %4 = llvm.ptrtoint %3 : !llvm.ptr to i64
+ %4 = ptr.ptrtoint %3 : !llvm.ptr to i64
%5 = llvm.mul %0, %4 : i64
%6 = llvm.call @malloc(%5) : (i64) -> !llvm.ptr
%8 = llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -54,7 +54,7 @@ module attributes {gpu.container_module} {
%5 = llvm.insertvalue %arg10, %4[4, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
%6 = llvm.mlir.constant(1 : index) : i64
%7 = llvm.alloca %6 x !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> : (i64) -> !llvm.ptr
- llvm.store %5, %7 : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>, !llvm.ptr
+ ptr.store %5, %7 : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>, !llvm.ptr
llvm.call @_mlir_ciface_vulkanLaunch(%arg0, %arg1, %arg2, %7) : (i64, i64, i64, !llvm.ptr) -> ()
llvm.return
}
diff --git a/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir b/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir
index 609fcb10b992c6..3461a991fd6d2b 100644
--- a/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir
@@ -13,7 +13,7 @@ func.func @mixed_alloc(%arg0: index, %arg1: index) -> memref<?x42x?xf32> {
// CHECK-NEXT: %[[sz:.*]] = llvm.mul %[[st0]], %[[M]] : i64
// CHECK-NEXT: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64
+// CHECK-NEXT: %[[sz_bytes:.*]] = ptr.ptrtoint %[[gep]] : !llvm.ptr to i64
// CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr
// CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)>
// CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)>
@@ -45,7 +45,7 @@ func.func @mixed_dealloc(%arg0: memref<?x42x?xf32>) {
// CHECK-LABEL: func @unranked_dealloc
func.func @unranked_dealloc(%arg0: memref<*xf32>) {
// CHECK: %[[memref:.*]] = llvm.extractvalue %{{.*}} : !llvm.struct<(i64, ptr)>
-// CHECK: %[[ptr:.*]] = llvm.load %[[memref]]
+// CHECK: %[[ptr:.*]] = ptr.load %[[memref]]
// CHECK-NEXT: llvm.call @free(%[[ptr]])
memref.dealloc %arg0 : memref<*xf32>
return
@@ -62,7 +62,7 @@ func.func @dynamic_alloc(%arg0: index, %arg1: index) -> memref<?x?xf32> {
// CHECK-NEXT: %[[sz:.*]] = llvm.mul %[[N]], %[[M]] : i64
// CHECK-NEXT: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK-NEXT: %[[sz_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64
+// CHECK-NEXT: %[[sz_bytes:.*]] = ptr.ptrtoint %[[gep]] : !llvm.ptr to i64
// CHECK-NEXT: llvm.call @malloc(%[[sz_bytes]]) : (i64) -> !llvm.ptr
// CHECK-NEXT: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)>
// CHECK-NEXT: llvm.insertvalue %{{.*}}, %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)>
@@ -130,7 +130,7 @@ func.func @stdlib_aligned_alloc(%N : index) -> memref<32x18xf32> {
// ALIGNED-ALLOC-NEXT: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64
// ALIGNED-ALLOC-NEXT: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr
// ALIGNED-ALLOC-NEXT: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// ALIGNED-ALLOC-NEXT: %[[bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64
+// ALIGNED-ALLOC-NEXT: %[[bytes:.*]] = ptr.ptrtoint %[[gep]] : !llvm.ptr to i64
// ALIGNED-ALLOC-NEXT: %[[alignment:.*]] = llvm.mlir.constant(32 : index) : i64
// ALIGNED-ALLOC-NEXT: %[[allocated:.*]] = llvm.call @aligned_alloc(%[[alignment]], %[[bytes]]) : (i64, i64) -> !llvm.ptr
%0 = memref.alloc() {alignment = 32} : memref<32x18xf32>
@@ -178,7 +178,7 @@ func.func @mixed_load(%mixed : memref<42x?xf32>, %i : index, %j : index) {
// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK-NEXT: llvm.load %[[addr]] : !llvm.ptr -> f32
+// CHECK-NEXT: ptr.load %[[addr]] : !llvm.ptr -> f32
%0 = memref.load %mixed[%i, %j] : memref<42x?xf32>
return
}
@@ -195,7 +195,7 @@ func.func @dynamic_load(%dynamic : memref<?x?xf32>, %i : index, %j : index) {
// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK-NEXT: llvm.load %[[addr]] : !llvm.ptr -> f32
+// CHECK-NEXT: ptr.load %[[addr]] : !llvm.ptr -> f32
%0 = memref.load %dynamic[%i, %j] : memref<?x?xf32>
return
}
@@ -233,7 +233,7 @@ func.func @dynamic_store(%dynamic : memref<?x?xf32>, %i : index, %j : index, %va
// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK-NEXT: llvm.store %{{.*}}, %[[addr]] : f32, !llvm.ptr
+// CHECK-NEXT: ptr.store %{{.*}}, %[[addr]] : f32, !llvm.ptr
memref.store %val, %dynamic[%i, %j] : memref<?x?xf32>
return
}
@@ -250,7 +250,7 @@ func.func @mixed_store(%mixed : memref<42x?xf32>, %i : index, %j : index, %val :
// CHECK-NEXT: %[[offI:.*]] = llvm.mul %[[I]], %[[st0]] : i64
// CHECK-NEXT: %[[off1:.*]] = llvm.add %[[offI]], %[[J]] : i64
// CHECK-NEXT: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK-NEXT: llvm.store %{{.*}}, %[[addr]] : f32, !llvm.ptr
+// CHECK-NEXT: ptr.store %{{.*}}, %[[addr]] : f32, !llvm.ptr
memref.store %val, %mixed[%i, %j] : memref<42x?xf32>
return
}
@@ -289,14 +289,14 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<
// CHECK: llvm.insertvalue [[RESULT_DESC]], [[RESULT_1]][1]
// Cast pointers
-// CHECK: [[SOURCE_ALLOC:%.*]] = llvm.load [[SOURCE_DESC]]
+// CHECK: [[SOURCE_ALLOC:%.*]] = ptr.load [[SOURCE_DESC]]
// CHECK: [[SOURCE_ALIGN_GEP:%.*]] = llvm.getelementptr [[SOURCE_DESC]][1]
-// CHECK: [[SOURCE_ALIGN:%.*]] = llvm.load [[SOURCE_ALIGN_GEP]] : !llvm.ptr
-// CHECK: [[RESULT_ALLOC:%.*]] = llvm.addrspacecast [[SOURCE_ALLOC]] : !llvm.ptr to !llvm.ptr<1>
-// CHECK: [[RESULT_ALIGN:%.*]] = llvm.addrspacecast [[SOURCE_ALIGN]] : !llvm.ptr to !llvm.ptr<1>
-// CHECK: llvm.store [[RESULT_ALLOC]], [[RESULT_DESC]] : !llvm.ptr
+// CHECK: [[SOURCE_ALIGN:%.*]] = ptr.load [[SOURCE_ALIGN_GEP]] : !llvm.ptr
+// CHECK: [[RESULT_ALLOC:%.*]] = ptr.addrspacecast [[SOURCE_ALLOC]] : !llvm.ptr to !llvm.ptr<1>
+// CHECK: [[RESULT_ALIGN:%.*]] = ptr.addrspacecast [[SOURCE_ALIGN]] : !llvm.ptr to !llvm.ptr<1>
+// CHECK: ptr.store [[RESULT_ALLOC]], [[RESULT_DESC]] : !llvm.ptr
// CHECK: [[RESULT_ALIGN_GEP:%.*]] = llvm.getelementptr [[RESULT_DESC]][1]
-// CHECK: llvm.store [[RESULT_ALIGN]], [[RESULT_ALIGN_GEP]] : !llvm.ptr
+// CHECK: ptr.store [[RESULT_ALIGN]], [[RESULT_ALIGN_GEP]] : !llvm.ptr
// Memcpy remaining values
@@ -376,14 +376,14 @@ func.func @memref_cast_mixed_to_mixed(%mixed : memref<42x?xf32>) {
func.func @memref_cast_ranked_to_unranked(%arg : memref<42x2x?xf32>) {
// CHECK-DAG: %[[c:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK-DAG: %[[p:.*]] = llvm.alloca %[[c]] x !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)> : (i64) -> !llvm.ptr
-// CHECK-DAG: llvm.store %{{.*}}, %[[p]] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)>, !llvm.ptr
+// CHECK-DAG: ptr.store %{{.*}}, %[[p]] : !llvm.struct<(ptr, ptr, i64, array<3 x i64>, array<3 x i64>)>, !llvm.ptr
// CHECK-DAG: %[[r:.*]] = llvm.mlir.constant(3 : index) : i64
// CHECK : llvm.mlir.undef : !llvm.struct<(i64, ptr)>
// CHECK-DAG: llvm.insertvalue %[[r]], %{{.*}}[0] : !llvm.struct<(i64, ptr)>
// CHECK-DAG: llvm.insertvalue %[[p]], %{{.*}}[1] : !llvm.struct<(i64, ptr)>
// CHECK32-DAG: %[[c:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK32-DAG: %[[p:.*]] = llvm.alloca %[[c]] x !llvm.struct<(ptr, ptr, i32, array<3 x i32>, array<3 x i32>)> : (i64) -> !llvm.ptr
-// CHECK32-DAG: llvm.store %{{.*}}, %[[p]] : !llvm.struct<(ptr, ptr, i32, array<3 x i32>, array<3 x i32>)>, !llvm.ptr
+// CHECK32-DAG: ptr.store %{{.*}}, %[[p]] : !llvm.struct<(ptr, ptr, i32, array<3 x i32>, array<3 x i32>)>, !llvm.ptr
// CHECK32-DAG: %[[r:.*]] = llvm.mlir.constant(3 : index) : i32
// CHECK32 : llvm.mlir.undef : !llvm.struct<(i32, ptr)>
// CHECK32-DAG: llvm.insertvalue %[[r]], %{{.*}}[0] : !llvm.struct<(i32, ptr)>
@@ -432,9 +432,9 @@ func.func @memref_dim_with_dyn_index(%arg : memref<3x?xf32>, %idx : index) -> in
// CHECK-DAG: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK-DAG: %[[SIZES:.*]] = llvm.extractvalue %{{.*}}[3] : ![[DESCR_TY:.*]]
// CHECK-DAG: %[[SIZES_PTR:.*]] = llvm.alloca %[[C1]] x !llvm.array<2 x i64> : (i64) -> !llvm.ptr
- // CHECK-DAG: llvm.store %[[SIZES]], %[[SIZES_PTR]] : !llvm.array<2 x i64>, !llvm.ptr
+ // CHECK-DAG: ptr.store %[[SIZES]], %[[SIZES_PTR]] : !llvm.array<2 x i64>, !llvm.ptr
// CHECK-DAG: %[[RESULT_PTR:.*]] = llvm.getelementptr %[[SIZES_PTR]][0, %[[IDX]]] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.array<2 x i64>
- // CHECK-DAG: %[[RESULT:.*]] = llvm.load %[[RESULT_PTR]] : !llvm.ptr -> i64
+ // CHECK-DAG: %[[RESULT:.*]] = ptr.load %[[RESULT_PTR]] : !llvm.ptr -> i64
%result = memref.dim %arg, %idx : memref<3x?xf32>
return %result : index
}
@@ -492,10 +492,10 @@ func.func @memref_reinterpret_cast_unranked_to_dynamic_shape(%offset: index,
// CHECK-DAG: [[INPUT:%.*]] = builtin.unrealized_conversion_cast
// CHECK: [[OUT_0:%.*]] = llvm.mlir.undef : [[TY:!.*]]
// CHECK: [[DESCRIPTOR:%.*]] = llvm.extractvalue [[INPUT]][1] : !llvm.struct<(i64, ptr)>
-// CHECK: [[BASE_PTR:%.*]] = llvm.load [[DESCRIPTOR]] : !llvm.ptr -> !llvm.ptr
+// CHECK: [[BASE_PTR:%.*]] = ptr.load [[DESCRIPTOR]] : !llvm.ptr -> !llvm.ptr
// CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[DESCRIPTOR]]{{\[}}1]
// CHECK-SAME: : (!llvm.ptr) -> !llvm.ptr, !llvm.ptr
-// CHECK: [[ALIGNED_PTR:%.*]] = llvm.load [[ALIGNED_PTR_PTR]] : !llvm.ptr -> !llvm.ptr
+// CHECK: [[ALIGNED_PTR:%.*]] = ptr.load [[ALIGNED_PTR_PTR]] : !llvm.ptr -> !llvm.ptr
// CHECK: [[OUT_1:%.*]] = llvm.insertvalue [[BASE_PTR]], [[OUT_0]][0] : [[TY]]
// CHECK: [[OUT_2:%.*]] = llvm.insertvalue [[ALIGNED_PTR]], [[OUT_1]][1] : [[TY]]
// CHECK: [[OUT_3:%.*]] = llvm.insertvalue [[OFFSET]], [[OUT_2]][2] : [[TY]]
@@ -532,11 +532,11 @@ func.func @memref_reshape(%input : memref<2x3xf32>, %shape : memref<?xindex>) {
// CHECK: [[ALLOC_PTR:%.*]] = llvm.extractvalue [[INPUT]][0] : [[INPUT_TY]]
// CHECK: [[ALIGN_PTR:%.*]] = llvm.extractvalue [[INPUT]][1] : [[INPUT_TY]]
// CHECK: [[OFFSET:%.*]] = llvm.extractvalue [[INPUT]][2] : [[INPUT_TY]]
-// CHECK: llvm.store [[ALLOC_PTR]], [[UNDERLYING_DESC]] : !llvm.ptr, !llvm.ptr
+// CHECK: ptr.store [[ALLOC_PTR]], [[UNDERLYING_DESC]] : !llvm.ptr, !llvm.ptr
// CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[UNDERLYING_DESC]]{{\[}}1]
-// CHECK: llvm.store [[ALIGN_PTR]], [[ALIGNED_PTR_PTR]] : !llvm.ptr, !llvm.ptr
+// CHECK: ptr.store [[ALIGN_PTR]], [[ALIGNED_PTR_PTR]] : !llvm.ptr, !llvm.ptr
// CHECK: [[OFFSET_PTR:%.*]] = llvm.getelementptr [[UNDERLYING_DESC]]{{\[}}2]
-// CHECK: llvm.store [[OFFSET]], [[OFFSET_PTR]] : i64, !llvm.ptr
+// CHECK: ptr.store [[OFFSET]], [[OFFSET_PTR]] : i64, !llvm.ptr
// Iterate over shape operand in reverse order and set sizes and strides.
// CHECK: [[SIZES_PTR:%.*]] = llvm.getelementptr [[UNDERLYING_DESC]]{{\[}}0, 3]
@@ -553,11 +553,11 @@ func.func @memref_reshape(%input : memref<2x3xf32>, %shape : memref<?xindex>) {
// CHECK: ^bb2:
// CHECK: [[SIZE_PTR:%.*]] = llvm.getelementptr [[SHAPE_IN_PTR]]{{\[}}[[DIM]]]
-// CHECK: [[SIZE:%.*]] = llvm.load [[SIZE_PTR]] : !llvm.ptr -> i64
+// CHECK: [[SIZE:%.*]] = ptr.load [[SIZE_PTR]] : !llvm.ptr -> i64
// CHECK: [[TARGET_SIZE_PTR:%.*]] = llvm.getelementptr [[SIZES_PTR]]{{\[}}[[DIM]]]
-// CHECK: llvm.store [[SIZE]], [[TARGET_SIZE_PTR]] : i64, !llvm.ptr
+// CHECK: ptr.store [[SIZE]], [[TARGET_SIZE_PTR]] : i64, !llvm.ptr
// CHECK: [[TARGET_STRIDE_PTR:%.*]] = llvm.getelementptr [[STRIDES_PTR]]{{\[}}[[DIM]]]
-// CHECK: llvm.store [[CUR_STRIDE]], [[TARGET_STRIDE_PTR]] : i64, !llvm.ptr
+// CHECK: ptr.store [[CUR_STRIDE]], [[TARGET_STRIDE_PTR]] : i64, !llvm.ptr
// CHECK: [[UPDATE_STRIDE:%.*]] = llvm.mul [[CUR_STRIDE]], [[SIZE]] : i64
// CHECK: [[STRIDE_COND:%.*]] = llvm.sub [[DIM]], [[C1_]] : i64
// CHECK: llvm.br ^bb1([[STRIDE_COND]], [[UPDATE_STRIDE]] : i64, i64)
@@ -572,7 +572,7 @@ func.func @memref_of_memref() {
// Sizeof computation is as usual.
// ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.zero
// ALIGNED-ALLOC: %[[PTR:.*]] = llvm.getelementptr
- // ALIGNED-ALLOC: %[[SIZEOF:.*]] = llvm.ptrtoint
+ // ALIGNED-ALLOC: %[[SIZEOF:.*]] = ptr.ptrtoint
// Static alignment should be computed as ceilPowerOf2(2 * sizeof(pointer) +
// (1 + 2 * rank) * sizeof(index) = ceilPowerOf2(2 * 8 + 3 * 8) = 64.
@@ -594,7 +594,7 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<index, 32>> } {
// Sizeof computation is as usual.
// ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.zero
// ALIGNED-ALLOC: %[[PTR:.*]] = llvm.getelementptr
- // ALIGNED-ALLOC: %[[SIZEOF:.*]] = llvm.ptrtoint
+ // ALIGNED-ALLOC: %[[SIZEOF:.*]] = ptr.ptrtoint
// Static alignment should be computed as ceilPowerOf2(2 * sizeof(pointer) +
// (1 + 2 * rank) * sizeof(index) = ceilPowerOf2(2 * 8 + 3 * 4) = 32.
@@ -617,7 +617,7 @@ func.func @memref_of_memref_of_memref() {
// Sizeof computation is as usual, also check the type.
// ALIGNED-ALLOC: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
// ALIGNED-ALLOC: %[[PTR:.*]] = llvm.getelementptr
- // ALIGNED-ALLOC: %[[SIZEOF:.*]] = llvm.ptrtoint
+ // ALIGNED-ALLOC: %[[SIZEOF:.*]] = ptr.ptrtoint
// Static alignment should be computed as ceilPowerOf2(2 * sizeof(pointer) +
// (1 + 2 * rank) * sizeof(index) = ceilPowerOf2(2 * 8 + 3 * 8) = 64.
@@ -634,7 +634,7 @@ func.func @ranked_unranked() {
// ALIGNED-ALLOC: llvm.mlir.zero
// ALIGNED-ALLOC-SAME: !llvm.ptr
// ALIGNED-ALLOC: llvm.getelementptr
- // ALIGNED-ALLOC: llvm.ptrtoint
+ // ALIGNED-ALLOC: ptr.ptrtoint
// Static alignment should be computed as ceilPowerOf2(sizeof(index) +
// sizeof(pointer)) = 16.
diff --git a/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir b/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir
index f1600d43e7bfb3..414a34580371eb 100644
--- a/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/convert-static-memref-ops.mlir
@@ -5,7 +5,7 @@ func.func @zero_d_alloc() -> memref<f32> {
// CHECK: %[[one:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[one]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64
+// CHECK: %[[size_bytes:.*]] = ptr.ptrtoint %[[gep]] : !llvm.ptr to i64
// CHECK: %[[ptr:.*]] = llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64)>
// CHECK: llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64)>
@@ -38,17 +38,17 @@ func.func @aligned_1d_alloc() -> memref<42xf32> {
// CHECK: %[[st1:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[sz1]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64
+// CHECK: %[[size_bytes:.*]] = ptr.ptrtoint %[[gep]] : !llvm.ptr to i64
// CHECK: %[[alignment:.*]] = llvm.mlir.constant(8 : index) : i64
// CHECK: %[[allocsize:.*]] = llvm.add %[[size_bytes]], %[[alignment]] : i64
// CHECK: %[[ptr:.*]] = llvm.call @malloc(%[[allocsize]]) : (i64) -> !llvm.ptr
-// CHECK: %[[allocatedAsInt:.*]] = llvm.ptrtoint %[[ptr]] : !llvm.ptr to i64
+// CHECK: %[[allocatedAsInt:.*]] = ptr.ptrtoint %[[ptr]] : !llvm.ptr to i64
// CHECK: %[[one_1:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: %[[bump:.*]] = llvm.sub %[[alignment]], %[[one_1]] : i64
// CHECK: %[[bumped:.*]] = llvm.add %[[allocatedAsInt]], %[[bump]] : i64
// CHECK: %[[mod:.*]] = llvm.urem %[[bumped]], %[[alignment]] : i64
// CHECK: %[[aligned:.*]] = llvm.sub %[[bumped]], %[[mod]] : i64
-// CHECK: %[[alignedBitCast:.*]] = llvm.inttoptr %[[aligned]] : i64 to !llvm.ptr
+// CHECK: %[[alignedBitCast:.*]] = ptr.inttoptr %[[aligned]] : i64 to !llvm.ptr
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: llvm.insertvalue %[[ptr]], %{{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: llvm.insertvalue %[[alignedBitCast]], %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -65,7 +65,7 @@ func.func @static_alloc() -> memref<32x18xf32> {
// CHECK: %[[num_elems:.*]] = llvm.mlir.constant(576 : index) : i64
// CHECK: %[[null:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[gep:.*]] = llvm.getelementptr %[[null]][%[[num_elems]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK: %[[size_bytes:.*]] = llvm.ptrtoint %[[gep]] : !llvm.ptr to i64
+// CHECK: %[[size_bytes:.*]] = ptr.ptrtoint %[[gep]] : !llvm.ptr to i64
// CHECK: llvm.call @malloc(%[[size_bytes]]) : (i64) -> !llvm.ptr
%0 = memref.alloc() : memref<32x18xf32>
return %0 : memref<32x18xf32>
@@ -108,7 +108,7 @@ func.func @static_dealloc(%static: memref<10x8xf32>) {
// CHECK-LABEL: func @zero_d_load
func.func @zero_d_load(%arg0: memref<f32>) -> f32 {
// CHECK: %[[ptr:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64)>
-// CHECK: %{{.*}} = llvm.load %[[ptr]] : !llvm.ptr -> f32
+// CHECK: %{{.*}} = ptr.load %[[ptr]] : !llvm.ptr -> f32
%0 = memref.load %arg0[] : memref<f32>
return %0 : f32
}
@@ -127,7 +127,7 @@ func.func @static_load(%static : memref<10x42xf32>, %i : index, %j : index) {
// CHECK: %[[offI:.*]] = llvm.mul %[[II]], %[[st0]] : i64
// CHECK: %[[off1:.*]] = llvm.add %[[offI]], %[[JJ]] : i64
// CHECK: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK: llvm.load %[[addr]] : !llvm.ptr -> f32
+// CHECK: ptr.load %[[addr]] : !llvm.ptr -> f32
%0 = memref.load %static[%i, %j] : memref<10x42xf32>
return
}
@@ -137,7 +137,7 @@ func.func @static_load(%static : memref<10x42xf32>, %i : index, %j : index) {
// CHECK-LABEL: func @zero_d_store
func.func @zero_d_store(%arg0: memref<f32>, %arg1: f32) {
// CHECK: %[[ptr:.*]] = llvm.extractvalue %[[ld:.*]][1] : !llvm.struct<(ptr, ptr, i64)>
-// CHECK: llvm.store %{{.*}}, %[[ptr]] : f32, !llvm.ptr
+// CHECK: ptr.store %{{.*}}, %[[ptr]] : f32, !llvm.ptr
memref.store %arg1, %arg0[] : memref<f32>
return
}
@@ -155,7 +155,7 @@ func.func @static_store(%static : memref<10x42xf32>, %i : index, %j : index, %va
// CHECK: %[[offI:.*]] = llvm.mul %[[II]], %[[st0]] : i64
// CHECK: %[[off1:.*]] = llvm.add %[[offI]], %[[JJ]] : i64
// CHECK: %[[addr:.*]] = llvm.getelementptr %[[ptr]][%[[off1]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK: llvm.store %{{.*}}, %[[addr]] : f32, !llvm.ptr
+// CHECK: ptr.store %{{.*}}, %[[addr]] : f32, !llvm.ptr
memref.store %val, %static[%i, %j] : memref<10x42xf32>
return
@@ -190,7 +190,7 @@ func.func @static_out_of_bound_memref_dim(%static : memref<42x32x15x13x27xf32>)
// CHECK: %[[C_MINUS_7:.*]] = arith.constant -7 : index
// CHECK: %[[C_MINUS_7_I64:.*]] = builtin.unrealized_conversion_cast %[[C_MINUS_7]] : index to i64
// CHECK: %[[UB_IDX:.*]] = llvm.getelementptr %{{.*}}[0, %[[C_MINUS_7_I64]]] : (!llvm.ptr, i64) -> !llvm.ptr
-// CHECK: %[[UB_DIM_I64:.*]] = llvm.load %[[UB_IDX]] : !llvm.ptr
+// CHECK: %[[UB_DIM_I64:.*]] = ptr.load %[[UB_IDX]] : !llvm.ptr
// CHECK: %[[UB_DIM:.*]] = builtin.unrealized_conversion_cast %[[UB_DIM_I64]] : i64 to index
// CHECK: return %[[UB_DIM]] : index
%c-7 = arith.constant -7 : index
@@ -209,16 +209,16 @@ module attributes { dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<index, 32>> } {
// CHECK: %[[CST:.*]] = builtin.unrealized_conversion_cast
// CHECK: llvm.mlir.zero
// CHECK: llvm.getelementptr %{{.*}}[[CST]]
- // CHECK: llvm.ptrtoint %{{.*}} : !llvm.ptr to i32
- // CHECK: llvm.ptrtoint %{{.*}} : !llvm.ptr to i32
+ // CHECK: ptr.ptrtoint %{{.*}} : !llvm.ptr to i32
+ // CHECK: ptr.ptrtoint %{{.*}} : !llvm.ptr to i32
// CHECK: llvm.add %{{.*}} : i32
// CHECK: llvm.call @malloc(%{{.*}}) : (i32) -> !llvm.ptr
- // CHECK: llvm.ptrtoint %{{.*}} : !llvm.ptr to i32
+ // CHECK: ptr.ptrtoint %{{.*}} : !llvm.ptr to i32
// CHECK: llvm.sub {{.*}} : i32
// CHECK: llvm.add {{.*}} : i32
// CHECK: llvm.urem {{.*}} : i32
// CHECK: llvm.sub {{.*}} : i32
- // CHECK: llvm.inttoptr %{{.*}} : i32 to !llvm.ptr
+ // CHECK: ptr.inttoptr %{{.*}} : i32 to !llvm.ptr
return
}
}
@@ -294,7 +294,7 @@ func.func @memref.reshape.dynamic.dim(%arg: memref<?x?x?xf32>, %shape: memref<4x
// CHECK: %[[one1:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: %[[shape_ptr0:.*]] = llvm.extractvalue %[[shape_cast]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: %[[shape_gep0:.*]] = llvm.getelementptr %[[shape_ptr0]][%[[one1]]] : (!llvm.ptr, i64) -> !llvm.ptr, i64
- // CHECK: %[[shape_load0:.*]] = llvm.load %[[shape_gep0]] : !llvm.ptr -> i64
+ // CHECK: %[[shape_load0:.*]] = ptr.load %[[shape_gep0]] : !llvm.ptr -> i64
// CHECK: %[[insert7:.*]] = llvm.insertvalue %[[shape_load0]], %[[insert6]][3, 1] : !llvm.struct<(ptr, ptr, i64, array<4 x i64>, array<4 x i64>)>
// CHECK: %[[insert8:.*]] = llvm.insertvalue %[[three_hundred_and_eighty_four]], %[[insert7]][4, 1] : !llvm.struct<(ptr, ptr, i64, array<4 x i64>, array<4 x i64>)>
@@ -302,7 +302,7 @@ func.func @memref.reshape.dynamic.dim(%arg: memref<?x?x?xf32>, %shape: memref<4x
// CHECK: %[[zero1:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK: %[[shape_ptr1:.*]] = llvm.extractvalue %[[shape_cast]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: %[[shape_gep1:.*]] = llvm.getelementptr %[[shape_ptr1]][%[[zero1]]] : (!llvm.ptr, i64) -> !llvm.ptr, i64
- // CHECK: %[[shape_load1:.*]] = llvm.load %[[shape_gep1]] : !llvm.ptr -> i64
+ // CHECK: %[[shape_load1:.*]] = ptr.load %[[shape_gep1]] : !llvm.ptr -> i64
// CHECK: %[[insert9:.*]] = llvm.insertvalue %[[shape_load1]], %[[insert8]][3, 0] : !llvm.struct<(ptr, ptr, i64, array<4 x i64>, array<4 x i64>)>
// CHECK: %[[insert10:.*]] = llvm.insertvalue %[[mul]], %[[insert9]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<4 x i64>, array<4 x i64>)>
@@ -334,7 +334,7 @@ func.func @memref.reshape_index(%arg0: memref<?x?xi32>, %shape: memref<1xindex>)
// CHECK: %[[shape_ptr0:.*]] = llvm.extractvalue %[[shape_cast:.*]][1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: %[[shape_gep0:.*]] = llvm.getelementptr %[[shape_ptr0:.*]][%[[zero1:.*]]] : (!llvm.ptr, i64) -> !llvm.ptr, i64
- // CHECK: %[[shape_load0:.*]] = llvm.load %[[shape_gep0:.*]] : !llvm.ptr -> i64
+ // CHECK: %[[shape_load0:.*]] = ptr.load %[[shape_gep0:.*]] : !llvm.ptr -> i64
// CHECK: %[[insert3:.*]] = llvm.insertvalue %[[shape_load0:.*]], %[[insert2:.*]][3, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: %[[insert4:.*]] = llvm.insertvalue %[[one0:.*]], %[[insert3:.*]][4, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -356,8 +356,8 @@ func.func @memref_memory_space_cast(%input : memref<?xf32>) -> memref<?xf32, 1>
// CHECK: [[OFFSET:%.*]] = llvm.extractvalue [[INPUT]][2]
// CHECK: [[SIZE:%.*]] = llvm.extractvalue [[INPUT]][3, 0]
// CHECK: [[STRIDE:%.*]] = llvm.extractvalue [[INPUT]][4, 0]
-// CHECK: [[CAST_ALLOC:%.*]] = llvm.addrspacecast [[ALLOC]] : !llvm.ptr to !llvm.ptr<1>
-// CHECK: [[CAST_ALIGN:%.*]] = llvm.addrspacecast [[ALIGN]] : !llvm.ptr to !llvm.ptr<1>
+// CHECK: [[CAST_ALLOC:%.*]] = ptr.addrspacecast [[ALLOC]] : !llvm.ptr to !llvm.ptr<1>
+// CHECK: [[CAST_ALIGN:%.*]] = ptr.addrspacecast [[ALIGN]] : !llvm.ptr to !llvm.ptr<1>
// CHECK: [[RESULT_0:%.*]] = llvm.mlir.undef
// CHECK: [[RESULT_1:%.*]] = llvm.insertvalue [[CAST_ALLOC]], [[RESULT_0]][0]
// CHECK: [[RESULT_2:%.*]] = llvm.insertvalue [[CAST_ALIGN]], [[RESULT_1]][1]
diff --git a/mlir/test/Conversion/MemRefToLLVM/expand-then-convert-to-llvm.mlir b/mlir/test/Conversion/MemRefToLLVM/expand-then-convert-to-llvm.mlir
index eb45112b117c0d..139b8b15d56874 100644
--- a/mlir/test/Conversion/MemRefToLLVM/expand-then-convert-to-llvm.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/expand-then-convert-to-llvm.mlir
@@ -682,12 +682,12 @@ func.func @collapse_static_shape_with_non_identity_layout(%arg: memref<1x1x8x8xf
// CHECK: %[[ALIGNED_PTR:.*]] = llvm.extractvalue %[[DESC]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[OFFSET:.*]] = llvm.extractvalue %[[DESC]][2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: %[[BUFF_ADDR:.*]] = llvm.getelementptr %[[ALIGNED_PTR]][%[[OFFSET]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK: %[[INT_TO_PTR:.*]] = llvm.ptrtoint %[[BUFF_ADDR]] : !llvm.ptr to i64
+// CHECK: %[[INT_TO_PTR:.*]] = ptr.ptrtoint %[[BUFF_ADDR]] : !llvm.ptr to i64
// CHECK: %[[AND:.*]] = llvm.and %[[INT_TO_PTR]], {{.*}} : i64
// CHECK: %[[CMP:.*]] = llvm.icmp "eq" %[[AND]], {{.*}} : i64
// CHECK: "llvm.intr.assume"(%[[CMP]]) : (i1) -> ()
// CHECK: %[[LD_ADDR:.*]] = llvm.getelementptr %[[BUFF_ADDR]][%{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK: %[[VAL:.*]] = llvm.load %[[LD_ADDR]] : !llvm.ptr -> f32
+// CHECK: %[[VAL:.*]] = ptr.load %[[LD_ADDR]] : !llvm.ptr -> f32
// CHECK: return %[[VAL]] : f32
func.func @load_and_assume(
%arg0: memref<?x?xf32, strided<[?, ?], offset: ?>>,
diff --git a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
index 37999d6fc14ad1..12b36d1b61986f 100644
--- a/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
+++ b/mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir
@@ -157,7 +157,7 @@ func.func @assume_alignment(%0 : memref<4x4xf16>) {
// CHECK: %[[PTR:.*]] = llvm.extractvalue %[[MEMREF:.*]][1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)>
// CHECK-NEXT: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK-NEXT: %[[MASK:.*]] = llvm.mlir.constant(15 : index) : i64
- // CHECK-NEXT: %[[INT:.*]] = llvm.ptrtoint %[[PTR]] : !llvm.ptr to i64
+ // CHECK-NEXT: %[[INT:.*]] = ptr.ptrtoint %[[PTR]] : !llvm.ptr to i64
// CHECK-NEXT: %[[MASKED_PTR:.*]] = llvm.and %[[INT]], %[[MASK:.*]] : i64
// CHECK-NEXT: %[[CONDITION:.*]] = llvm.icmp "eq" %[[MASKED_PTR]], %[[ZERO]] : i64
// CHECK-NEXT: "llvm.intr.assume"(%[[CONDITION]]) : (i1) -> ()
@@ -174,7 +174,7 @@ func.func @assume_alignment_w_offset(%0 : memref<4x4xf16, strided<[?, ?], offset
// CHECK-DAG: %[[BUFF_ADDR:.*]] = llvm.getelementptr %[[PTR]][%[[OFFSET]]] : (!llvm.ptr, i64) -> !llvm.ptr, f16
// CHECK-DAG: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK-DAG: %[[MASK:.*]] = llvm.mlir.constant(15 : index) : i64
- // CHECK-NEXT: %[[INT:.*]] = llvm.ptrtoint %[[BUFF_ADDR]] : !llvm.ptr to i64
+ // CHECK-NEXT: %[[INT:.*]] = ptr.ptrtoint %[[BUFF_ADDR]] : !llvm.ptr to i64
// CHECK-NEXT: %[[MASKED_PTR:.*]] = llvm.and %[[INT]], %[[MASK:.*]] : i64
// CHECK-NEXT: %[[CONDITION:.*]] = llvm.icmp "eq" %[[MASKED_PTR]], %[[ZERO]] : i64
// CHECK-NEXT: "llvm.intr.assume"(%[[CONDITION]]) : (i1) -> ()
@@ -204,21 +204,21 @@ func.func @dim_of_unranked(%unranked: memref<*xi32>) -> index {
// CHECK: %[[SIZE_PTR:.*]] = llvm.getelementptr %[[OFFSET_PTR]]{{\[}}
// CHECK-SAME: %[[INDEX_INC]]] : (!llvm.ptr, i64) -> !llvm.ptr
-// CHECK: %[[SIZE:.*]] = llvm.load %[[SIZE_PTR]] : !llvm.ptr -> i64
+// CHECK: %[[SIZE:.*]] = ptr.load %[[SIZE_PTR]] : !llvm.ptr -> i64
-// CHECK32: %[[SIZE:.*]] = llvm.load %{{.*}} : !llvm.ptr -> i32
+// CHECK32: %[[SIZE:.*]] = ptr.load %{{.*}} : !llvm.ptr -> i32
// -----
// CHECK-LABEL: func @address_space(
func.func @address_space(%arg0 : memref<32xf32, affine_map<(d0) -> (d0)>, 7>) {
// CHECK: %[[MEMORY:.*]] = llvm.call @malloc(%{{.*}})
- // CHECK: %[[CAST:.*]] = llvm.addrspacecast %[[MEMORY]] : !llvm.ptr to !llvm.ptr<5>
+ // CHECK: %[[CAST:.*]] = ptr.addrspacecast %[[MEMORY]] : !llvm.ptr to !llvm.ptr<5>
// CHECK: llvm.insertvalue %[[CAST]], %{{[[:alnum:]]+}}[0]
// CHECK: llvm.insertvalue %[[CAST]], %{{[[:alnum:]]+}}[1]
%0 = memref.alloc() : memref<32xf32, affine_map<(d0) -> (d0)>, 5>
%1 = arith.constant 7 : index
- // CHECK: llvm.load %{{.*}} : !llvm.ptr<5> -> f32
+ // CHECK: ptr.load %{{.*}} : !llvm.ptr<5> -> f32
%2 = memref.load %0[%1] : memref<32xf32, affine_map<(d0) -> (d0)>, 5>
func.return
}
@@ -270,7 +270,7 @@ func.func @get_gv0_memref() {
// CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv0 : !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<2 x f32>
// CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64
- // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr
+ // CHECK: %[[DEADBEEFPTR:.*]] = ptr.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: llvm.insertvalue %[[DEADBEEFPTR]], {{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: llvm.insertvalue %[[GEP]], {{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -290,7 +290,7 @@ func.func @get_gv2_memref() {
// CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv2 : !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][0, 0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<2 x array<3 x f32>>
// CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64
- // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr
+ // CHECK: %[[DEADBEEFPTR:.*]] = ptr.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.insertvalue %[[DEADBEEFPTR]], {{.*}}[0] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: llvm.insertvalue %[[GEP]], {{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)>
@@ -314,7 +314,7 @@ func.func @get_gv3_memref() {
// CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv3 : !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][0] : (!llvm.ptr) -> !llvm.ptr, f32
// CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64
- // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr
+ // CHECK: %[[DEADBEEFPTR:.*]] = ptr.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr
// CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64)>
// CHECK: llvm.insertvalue %[[DEADBEEFPTR]], {{.*}}[0] : !llvm.struct<(ptr, ptr, i64)>
// CHECK: llvm.insertvalue %[[GEP]], {{.*}}[1] : !llvm.struct<(ptr, ptr, i64)>
@@ -378,23 +378,23 @@ func.func @rank_of_ranked(%ranked: memref<?xi32>) {
// CHECK-LABEL: func @atomic_rmw
func.func @atomic_rmw(%I : memref<10xi32>, %ival : i32, %F : memref<10xf32>, %fval : f32, %i : index) {
memref.atomic_rmw assign %fval, %F[%i] : (f32, memref<10xf32>) -> f32
- // CHECK: llvm.atomicrmw xchg %{{.*}}, %{{.*}} acq_rel
+ // CHECK: ptr.atomicrmw xchg %{{.*}}, %{{.*}} acq_rel
memref.atomic_rmw addi %ival, %I[%i] : (i32, memref<10xi32>) -> i32
- // CHECK: llvm.atomicrmw add %{{.*}}, %{{.*}} acq_rel
+ // CHECK: ptr.atomicrmw add %{{.*}}, %{{.*}} acq_rel
memref.atomic_rmw maxs %ival, %I[%i] : (i32, memref<10xi32>) -> i32
- // CHECK: llvm.atomicrmw max %{{.*}}, %{{.*}} acq_rel
+ // CHECK: ptr.atomicrmw max %{{.*}}, %{{.*}} acq_rel
memref.atomic_rmw mins %ival, %I[%i] : (i32, memref<10xi32>) -> i32
- // CHECK: llvm.atomicrmw min %{{.*}}, %{{.*}} acq_rel
+ // CHECK: ptr.atomicrmw min %{{.*}}, %{{.*}} acq_rel
memref.atomic_rmw maxu %ival, %I[%i] : (i32, memref<10xi32>) -> i32
- // CHECK: llvm.atomicrmw umax %{{.*}}, %{{.*}} acq_rel
+ // CHECK: ptr.atomicrmw umax %{{.*}}, %{{.*}} acq_rel
memref.atomic_rmw minu %ival, %I[%i] : (i32, memref<10xi32>) -> i32
- // CHECK: llvm.atomicrmw umin %{{.*}}, %{{.*}} acq_rel
+ // CHECK: ptr.atomicrmw umin %{{.*}}, %{{.*}} acq_rel
memref.atomic_rmw addf %fval, %F[%i] : (f32, memref<10xf32>) -> f32
- // CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} acq_rel
+ // CHECK: ptr.atomicrmw fadd %{{.*}}, %{{.*}} acq_rel
memref.atomic_rmw ori %ival, %I[%i] : (i32, memref<10xi32>) -> i32
- // CHECK: llvm.atomicrmw _or %{{.*}}, %{{.*}} acq_rel
+ // CHECK: ptr.atomicrmw _or %{{.*}}, %{{.*}} acq_rel
memref.atomic_rmw andi %ival, %I[%i] : (i32, memref<10xi32>) -> i32
- // CHECK: llvm.atomicrmw _and %{{.*}}, %{{.*}} acq_rel
+ // CHECK: ptr.atomicrmw _and %{{.*}}, %{{.*}} acq_rel
return
}
@@ -414,7 +414,7 @@ func.func @atomic_rmw_with_offset(%I : memref<10xi32, strided<[1], offset: 5>>,
// CHECK: %[[OFFSET:.+]] = llvm.mlir.constant(5 : index) : i64
// CHECK: %[[OFFSET_PTR:.+]] = llvm.getelementptr %[[BASE_PTR]][%[[OFFSET]]] : (!llvm.ptr, i64) -> !llvm.ptr, i32
// CHECK: %[[PTR:.+]] = llvm.getelementptr %[[OFFSET_PTR]][%[[INDEX]]] : (!llvm.ptr, i64) -> !llvm.ptr, i32
-// CHECK: llvm.atomicrmw _and %[[PTR]], %[[ARG1]] acq_rel
+// CHECK: ptr.atomicrmw _and %[[PTR]], %[[ARG1]] acq_rel
// -----
@@ -426,7 +426,7 @@ func.func @generic_atomic_rmw(%I : memref<10xi32>, %i : index) {
}
llvm.return
}
-// CHECK: %[[INIT:.*]] = llvm.load %{{.*}} : !llvm.ptr -> i32
+// CHECK: %[[INIT:.*]] = ptr.load %{{.*}} : !llvm.ptr -> i32
// CHECK-NEXT: llvm.br ^bb1(%[[INIT]] : i32)
// CHECK-NEXT: ^bb1(%[[LOADED:.*]]: i32):
// CHECK-NEXT: %[[PAIR:.*]] = llvm.cmpxchg %{{.*}}, %[[LOADED]], %[[LOADED]]
@@ -452,7 +452,7 @@ func.func @generic_atomic_rmw_in_alloca_scope(){
// CHECK: %[[STACK_SAVE:.*]] = llvm.intr.stacksave : !llvm.ptr
// CHECK-NEXT: llvm.br ^bb1
// CHECK: ^bb1:
-// CHECK: %[[INIT:.*]] = llvm.load %[[BUF:.*]] : !llvm.ptr -> i32
+// CHECK: %[[INIT:.*]] = ptr.load %[[BUF:.*]] : !llvm.ptr -> i32
// CHECK-NEXT: llvm.br ^bb2(%[[INIT]] : i32)
// CHECK-NEXT: ^bb2(%[[LOADED:.*]]: i32):
// CHECK-NEXT: %[[PAIR:.*]] = llvm.cmpxchg %[[BUF]], %[[LOADED]], %[[LOADED]]
@@ -484,7 +484,7 @@ func.func @memref_copy_ranked() {
// CHECK: [[MUL:%.*]] = llvm.mul [[ONE]], [[EXTRACT0]] : i64
// CHECK: [[NULL:%.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: [[GEP:%.*]] = llvm.getelementptr [[NULL]][1] : (!llvm.ptr) -> !llvm.ptr, f32
- // CHECK: [[PTRTOINT:%.*]] = llvm.ptrtoint [[GEP]] : !llvm.ptr to i64
+ // CHECK: [[PTRTOINT:%.*]] = ptr.ptrtoint [[GEP]] : !llvm.ptr to i64
// CHECK: [[SIZE:%.*]] = llvm.mul [[MUL]], [[PTRTOINT]] : i64
// CHECK: [[EXTRACT1P:%.*]] = llvm.extractvalue {{%.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
// CHECK: [[EXTRACT1O:%.*]] = llvm.extractvalue {{%.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
@@ -515,7 +515,7 @@ func.func @memref_copy_contiguous(%in: memref<16x4xi32>, %offset: index) {
// CHECK: [[MUL2:%.*]] = llvm.mul [[MUL1]], [[EXTRACT1]] : i64
// CHECK: [[NULL:%.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: [[GEP:%.*]] = llvm.getelementptr [[NULL]][1] : (!llvm.ptr) -> !llvm.ptr, i32
- // CHECK: [[PTRTOINT:%.*]] = llvm.ptrtoint [[GEP]] : !llvm.ptr to i64
+ // CHECK: [[PTRTOINT:%.*]] = ptr.ptrtoint [[GEP]] : !llvm.ptr to i64
// CHECK: [[SIZE:%.*]] = llvm.mul [[MUL2]], [[PTRTOINT]] : i64
// CHECK: [[EXTRACT1P:%.*]] = llvm.extractvalue {{%.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)>
// CHECK: [[EXTRACT1O:%.*]] = llvm.extractvalue {{%.*}}[2] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)>
@@ -565,7 +565,7 @@ func.func @memref_copy_unranked() {
memref.copy %1, %3 : memref<*xi1> to memref<*xi1>
// CHECK: [[ONE:%.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: [[ALLOCA:%.*]] = llvm.alloca [[ONE]] x !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> : (i64) -> !llvm.ptr
- // CHECK: llvm.store {{%.*}}, [[ALLOCA]] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>, !llvm.ptr
+ // CHECK: ptr.store {{%.*}}, [[ALLOCA]] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>, !llvm.ptr
// CHECK: [[RANK:%.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: [[UNDEF:%.*]] = llvm.mlir.undef : !llvm.struct<(i64, ptr)>
// CHECK: [[INSERT:%.*]] = llvm.insertvalue [[RANK]], [[UNDEF]][0] : !llvm.struct<(i64, ptr)>
@@ -573,11 +573,11 @@ func.func @memref_copy_unranked() {
// CHECK: [[STACKSAVE:%.*]] = llvm.intr.stacksave : !llvm.ptr
// CHECK: [[RANK2:%.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: [[ALLOCA2:%.*]] = llvm.alloca [[RANK2]] x !llvm.struct<(i64, ptr)> : (i64) -> !llvm.ptr
- // CHECK: llvm.store {{%.*}}, [[ALLOCA2]] : !llvm.struct<(i64, ptr)>, !llvm.ptr
+ // CHECK: ptr.store {{%.*}}, [[ALLOCA2]] : !llvm.struct<(i64, ptr)>, !llvm.ptr
// CHECK: [[ALLOCA3:%.*]] = llvm.alloca [[RANK2]] x !llvm.struct<(i64, ptr)> : (i64) -> !llvm.ptr
- // CHECK: llvm.store [[INSERT2]], [[ALLOCA3]] : !llvm.struct<(i64, ptr)>, !llvm.ptr
+ // CHECK: ptr.store [[INSERT2]], [[ALLOCA3]] : !llvm.struct<(i64, ptr)>, !llvm.ptr
// CHECK: [[SIZEPTR:%.*]] = llvm.getelementptr {{%.*}}[1] : (!llvm.ptr) -> !llvm.ptr, i1
- // CHECK: [[SIZE:%.*]] = llvm.ptrtoint [[SIZEPTR]] : !llvm.ptr to i64
+ // CHECK: [[SIZE:%.*]] = ptr.ptrtoint [[SIZEPTR]] : !llvm.ptr to i64
// CHECK: llvm.call @memrefCopy([[SIZE]], [[ALLOCA2]], [[ALLOCA3]]) : (i64, !llvm.ptr, !llvm.ptr) -> ()
// CHECK: llvm.intr.stackrestore [[STACKSAVE]]
return
@@ -589,7 +589,7 @@ func.func @memref_copy_unranked() {
func.func @extract_aligned_pointer_as_index(%m: memref<?xf32>) -> index {
%0 = memref.extract_aligned_pointer_as_index %m: memref<?xf32> -> index
// CHECK: %[[E:.*]] = llvm.extractvalue %{{.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)>
- // CHECK: %[[I64:.*]] = llvm.ptrtoint %[[E]] : !llvm.ptr to i64
+ // CHECK: %[[I64:.*]] = ptr.ptrtoint %[[E]] : !llvm.ptr to i64
// CHECK: %[[R:.*]] = builtin.unrealized_conversion_cast %[[I64]] : i64 to index
// CHECK: return %[[R:.*]] : index
@@ -630,7 +630,7 @@ func.func @extract_strided_metadata(
// CHECK-LABEL: func @load_non_temporal(
func.func @load_non_temporal(%arg0 : memref<32xf32, affine_map<(d0) -> (d0)>>) {
%1 = arith.constant 7 : index
- // CHECK: llvm.load %{{.*}} {nontemporal} : !llvm.ptr -> f32
+ // CHECK: ptr.load %{{.*}} {nontemporal} : !llvm.ptr -> f32
%2 = memref.load %arg0[%1] {nontemporal = true} : memref<32xf32, affine_map<(d0) -> (d0)>>
func.return
}
@@ -641,7 +641,7 @@ func.func @load_non_temporal(%arg0 : memref<32xf32, affine_map<(d0) -> (d0)>>) {
func.func @store_non_temporal(%input : memref<32xf32, affine_map<(d0) -> (d0)>>, %output : memref<32xf32, affine_map<(d0) -> (d0)>>) {
%1 = arith.constant 7 : index
%2 = memref.load %input[%1] {nontemporal = true} : memref<32xf32, affine_map<(d0) -> (d0)>>
- // CHECK: llvm.store %{{.*}}, %{{.*}} {nontemporal} : f32, !llvm.ptr
+ // CHECK: ptr.store %{{.*}}, %{{.*}} {nontemporal} : f32, !llvm.ptr
memref.store %2, %output[%1] {nontemporal = true} : memref<32xf32, affine_map<(d0) -> (d0)>>
func.return
}
diff --git a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
index e11449e6f7c457..d3abcc9b8cf699 100644
--- a/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
+++ b/mlir/test/Conversion/NVGPUToNVVM/nvgpu-to-nvvm.mlir
@@ -237,7 +237,7 @@ func.func @async_cp(
// CHECK-DAG: %[[FI3:.*]] = llvm.mul %[[IDX1]], %[[S3]] : i64
// CHECK-DAG: %[[FI4:.*]] = llvm.add %[[FI3]], %[[IDX1]] : i64
// CHECK-DAG: %[[ADDRESSSRC:.*]] = llvm.getelementptr %[[BASESRC]][%[[FI4]]] : (!llvm.ptr, i64) -> !llvm.ptr
- // CHECK-DAG: %[[CAST2:.*]] = llvm.addrspacecast %[[ADDRESSSRC]] : !llvm.ptr to !llvm.ptr<1>
+ // CHECK-DAG: %[[CAST2:.*]] = ptr.addrspacecast %[[ADDRESSSRC]] : !llvm.ptr to !llvm.ptr<1>
// CHECK-DAG: nvvm.cp.async.shared.global %[[ADDRESSDST]], %[[CAST2]], 16, cache = ca
%0 = nvgpu.device_async_copy %src[%i, %i], %dst[%i, %i, %i], 4 : memref<128x128xf32> to memref<3x16x128xf32, 3>
// CHECK: nvvm.cp.async.commit.group
@@ -265,7 +265,7 @@ func.func @async_cp_i4(
// CHECK-DAG: %[[FI2:.*]] = llvm.mul %[[IDX1]], %[[S2]] : i64
// CHECK-DAG: %[[FI3:.*]] = llvm.add %[[FI2]], %[[IDX1]] : i64
// CHECK-DAG: %[[ADDRESSSRC:.*]] = llvm.getelementptr %[[BASESRC]][%[[FI3]]] : (!llvm.ptr, i64) -> !llvm.ptr
- // CHECK-DAG: %[[CAST2:.*]] = llvm.addrspacecast %[[ADDRESSSRC]] : !llvm.ptr to !llvm.ptr<1>
+ // CHECK-DAG: %[[CAST2:.*]] = ptr.addrspacecast %[[ADDRESSSRC]] : !llvm.ptr to !llvm.ptr<1>
// CHECK-DAG: nvvm.cp.async.shared.global %[[ADDRESSDST]], %[[CAST2]], 16, cache = ca
%0 = nvgpu.device_async_copy %src[%i, %i], %dst[%i, %i], 32 : memref<128x64xi4> to memref<128x128xi4, 3>
return %0 : !nvgpu.device.async.token
@@ -290,7 +290,7 @@ func.func @async_cp_zfill_f32_align4(
// CHECK-DAG: %[[FI2:.*]] = llvm.mul %[[IDX1]], %[[S2]] : i64
// CHECK-DAG: %[[FI3:.*]] = llvm.add %[[FI2]], %[[IDX1]] : i64
// CHECK-DAG: %[[ADDRESSSRC:.*]] = llvm.getelementptr %[[BASESRC]][%[[FI3]]] : (!llvm.ptr, i64) -> !llvm.ptr
- // CHECK-DAG: %[[CAST2:.*]] = llvm.addrspacecast %[[ADDRESSSRC]] : !llvm.ptr to !llvm.ptr<1>
+ // CHECK-DAG: %[[CAST2:.*]] = ptr.addrspacecast %[[ADDRESSSRC]] : !llvm.ptr to !llvm.ptr<1>
// CHECK-DAG: %[[c1:.*]] = llvm.mlir.constant(3 : i32) : i32
// CHECK-DAG: %[[c2:.*]] = llvm.mlir.constant(32 : i32) : i32
// CHECK-DAG: %[[c3:.*]] = llvm.trunc %[[SRC1]] : i64 to i32
@@ -325,7 +325,7 @@ func.func @async_cp_zfill_f32_align1(
// CHECK-DAG: %[[FI2:.*]] = llvm.mul %[[IDX1]], %[[S2]] : i64
// CHECK-DAG: %[[FI3:.*]] = llvm.add %[[FI2]], %[[IDX1]] : i64
// CHECK-DAG: %[[ADDRESSSRC:.*]] = llvm.getelementptr %[[BASESRC]][%[[FI3]]] : (!llvm.ptr, i64) -> !llvm.ptr
- // CHECK-DAG: %[[CAST2:.*]] = llvm.addrspacecast %[[ADDRESSSRC]] : !llvm.ptr to !llvm.ptr<1>
+ // CHECK-DAG: %[[CAST2:.*]] = ptr.addrspacecast %[[ADDRESSSRC]] : !llvm.ptr to !llvm.ptr<1>
// CHECK-DAG: %[[c1:.*]] = llvm.mlir.constant(3 : i32) : i32
// CHECK-DAG: %[[c2:.*]] = llvm.mlir.constant(32 : i32) : i32
// CHECK-DAG: %[[c3:.*]] = llvm.trunc %[[SRC1]] : i64 to i32
@@ -769,7 +769,7 @@ func.func @create_wgmma_descriptor(%tensorMap : !tensorMap) -> !nvgpu.warpgroup.
// CHECK: %[[c64:.+]] = llvm.mlir.constant(64 : i64) : i64
// CHECK: %[[c1024:.+]] = llvm.mlir.constant(1024 : i64) : i64
// CHECK: %[[S2:.+]] = llvm.extractvalue %[[S1]][1] : !llvm.struct<(ptr<3>, ptr<3>, i64, array<2 x i64>, array<2 x i64>)>
- // CHECK: %[[S3:.+]] = llvm.ptrtoint %[[S2]] : !llvm.ptr<3> to i64
+ // CHECK: %[[S3:.+]] = ptr.ptrtoint %[[S2]] : !llvm.ptr<3> to i64
// CHECK: %[[S4:.+]] = llvm.mlir.constant(46 : i64) : i64
// CHECK: %[[S5:.+]] = llvm.shl %[[S3]], %[[S4]] : i64
// CHECK: %[[S6:.+]] = llvm.mlir.constant(50 : i64) : i64
@@ -1135,7 +1135,7 @@ module attributes {transform.with_named_sequence} {
} with type_converter {
transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
{use_opaque_pointers = true}
- } {legal_dialects = ["arith", "func", "llvm", "memref", "nvvm", "vector", "scf"], partial_conversion} : !transform.any_op
+ } {legal_dialects = ["arith", "func", "llvm", "ptr", "memref", "nvvm", "vector", "scf"], partial_conversion} : !transform.any_op
transform.yield
}
}
diff --git a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
index 3fbeaebb592a4d..4f95eb9ac0e22b 100644
--- a/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
+++ b/mlir/test/Conversion/OpenMPToLLVM/convert-to-llvmir.mlir
@@ -223,7 +223,7 @@ llvm.func @_QPomp_target_data(%a : !llvm.ptr, %b : !llvm.ptr, %c : !llvm.ptr, %d
// CHECK: %[[MAP_0:.*]] = omp.map_info var_ptr(%[[ARG0]] : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
// CHECK: omp.target_data map_entries(%[[MAP_0]] : !llvm.ptr) {
// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(10 : i32) : i32
-// CHECK: llvm.store %[[VAL_1]], %[[ARG1]] : i32, !llvm.ptr
+// CHECK: ptr.store %[[VAL_1]], %[[ARG1]] : i32, !llvm.ptr
// CHECK: omp.terminator
// CHECK: }
// CHECK: llvm.return
@@ -232,7 +232,7 @@ llvm.func @_QPomp_target_data_region(%a : !llvm.ptr, %i : !llvm.ptr) {
%1 = omp.map_info var_ptr(%a : !llvm.ptr, !llvm.array<1024 x i32>) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
omp.target_data map_entries(%1 : !llvm.ptr) {
%2 = llvm.mlir.constant(10 : i32) : i32
- llvm.store %2, %i : i32, !llvm.ptr
+ ptr.store %2, %i : i32, !llvm.ptr
omp.terminator
}
llvm.return
@@ -249,7 +249,7 @@ llvm.func @_QPomp_target_data_region(%a : !llvm.ptr, %i : !llvm.ptr) {
// CHECK: omp.target thread_limit(%[[VAL_0]] : i32) map_entries(%[[MAP1]] -> %[[BB_ARG0:.*]], %[[MAP2]] -> %[[BB_ARG1:.*]] : !llvm.ptr, !llvm.ptr) {
// CHECK: ^bb0(%[[BB_ARG0]]: !llvm.ptr, %[[BB_ARG1]]: !llvm.ptr):
// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(10 : i32) : i32
-// CHECK: llvm.store %[[VAL_1]], %[[BB_ARG1]] : i32, !llvm.ptr
+// CHECK: ptr.store %[[VAL_1]], %[[BB_ARG1]] : i32, !llvm.ptr
// CHECK: omp.terminator
// CHECK: }
// CHECK: llvm.return
@@ -262,7 +262,7 @@ llvm.func @_QPomp_target(%a : !llvm.ptr, %i : !llvm.ptr) {
omp.target thread_limit(%0 : i32) map_entries(%1 -> %arg0, %3 -> %arg1 : !llvm.ptr, !llvm.ptr) {
^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr):
%2 = llvm.mlir.constant(10 : i32) : i32
- llvm.store %2, %arg1 : i32, !llvm.ptr
+ ptr.store %2, %arg1 : i32, !llvm.ptr
omp.terminator
}
llvm.return
@@ -347,16 +347,16 @@ llvm.func @_QPsimple_reduction(%arg0: !llvm.ptr {fir.bindc_name = "y"}) {
%3 = llvm.mlir.constant(1 : i64) : i64
%4 = llvm.alloca %3 x i32 {bindc_name = "x", uniq_name = "_QFsimple_reductionEx"} : (i64) -> !llvm.ptr
%5 = llvm.zext %2 : i1 to i32
- llvm.store %5, %4 : i32, !llvm.ptr
+ ptr.store %5, %4 : i32, !llvm.ptr
omp.parallel {
%6 = llvm.alloca %3 x i32 {adapt.valuebyref, in_type = i32, operandSegmentSizes = array<i32: 0, 0>, pinned} : (i64) -> !llvm.ptr
omp.wsloop reduction(@eqv_reduction -> %4 : !llvm.ptr) for (%arg1) : i32 = (%1) to (%0) inclusive step (%1) {
- llvm.store %arg1, %6 : i32, !llvm.ptr
- %7 = llvm.load %6 : !llvm.ptr -> i32
+ ptr.store %arg1, %6 : i32, !llvm.ptr
+ %7 = ptr.load %6 : !llvm.ptr -> i32
%8 = llvm.sext %7 : i32 to i64
%9 = llvm.sub %8, %3 : i64
%10 = llvm.getelementptr %arg0[0, %9] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.array<100 x i32>
- %11 = llvm.load %10 : !llvm.ptr -> i32
+ %11 = ptr.load %10 : !llvm.ptr -> i32
omp.reduction %11, %4 : i32, !llvm.ptr
omp.yield
}
@@ -382,7 +382,7 @@ llvm.func @_QQmain() {
%8 = llvm.icmp "sgt" %7, %0 : i64
llvm.cond_br %8, ^bb2, ^bb3
^bb2: // pred: ^bb1
- llvm.store %6, %4 : i32, !llvm.ptr
+ ptr.store %6, %4 : i32, !llvm.ptr
// CHECK: omp.task
omp.task {
// CHECK: llvm.call @[[CALL_FUNC:.*]]({{.*}}) :
@@ -390,12 +390,12 @@ llvm.func @_QQmain() {
// CHECK: omp.terminator
omp.terminator
}
- %9 = llvm.load %4 : !llvm.ptr -> i32
+ %9 = ptr.load %4 : !llvm.ptr -> i32
%10 = llvm.add %9, %5 : i32
%11 = llvm.sub %7, %2 : i64
llvm.br ^bb1(%10, %11 : i32, i64)
^bb3: // pred: ^bb1
- llvm.store %6, %4 : i32, !llvm.ptr
+ ptr.store %6, %4 : i32, !llvm.ptr
// CHECK: omp.terminator
omp.terminator
}
@@ -422,15 +422,15 @@ llvm.func @sub_() {
%7 = llvm.icmp "sgt" %6, %0 : i64
llvm.cond_br %7, ^bb2, ^bb3
^bb2: // pred: ^bb1
- llvm.store %5, %3 : i32, !llvm.ptr
- %8 = llvm.load %3 : !llvm.ptr -> i32
+ ptr.store %5, %3 : i32, !llvm.ptr
+ %8 = ptr.load %3 : !llvm.ptr -> i32
// CHECK: llvm.add
%9 = arith.addi %8, %4 : i32
// CHECK: llvm.sub
%10 = arith.subi %6, %1 : i64
llvm.br ^bb1(%9, %10 : i32, i64)
^bb3: // pred: ^bb1
- llvm.store %5, %3 : i32, !llvm.ptr
+ ptr.store %5, %3 : i32, !llvm.ptr
// CHECK: omp.terminator
omp.terminator
}
diff --git a/mlir/test/Conversion/SCFToOpenMP/reductions.mlir b/mlir/test/Conversion/SCFToOpenMP/reductions.mlir
index faf5ec4aba7d4d..1915a8a5cb57b4 100644
--- a/mlir/test/Conversion/SCFToOpenMP/reductions.mlir
+++ b/mlir/test/Conversion/SCFToOpenMP/reductions.mlir
@@ -13,8 +13,8 @@
// CHECK: atomic
// CHECK: ^{{.*}}(%[[ARG0:.*]]: !llvm.ptr, %[[ARG1:.*]]: !llvm.ptr):
-// CHECK: %[[RHS:.*]] = llvm.load %[[ARG1]] : !llvm.ptr -> f32
-// CHECK: llvm.atomicrmw fadd %[[ARG0]], %[[RHS]] monotonic
+// CHECK: %[[RHS:.*]] = ptr.load %[[ARG1]] : !llvm.ptr -> f32
+// CHECK: ptr.atomicrmw fadd %[[ARG0]], %[[RHS]] monotonic
// CHECK-LABEL: @reduction1
func.func @reduction1(%arg0 : index, %arg1 : index, %arg2 : index,
@@ -22,7 +22,7 @@ func.func @reduction1(%arg0 : index, %arg1 : index, %arg2 : index,
// CHECK: %[[CST:.*]] = arith.constant 0.0
// CHECK: %[[ONE:.*]] = llvm.mlir.constant(1
// CHECK: %[[BUF:.*]] = llvm.alloca %[[ONE]] x f32
- // CHECK: llvm.store %[[CST]], %[[BUF]]
+ // CHECK: ptr.store %[[CST]], %[[BUF]]
%step = arith.constant 1 : index
%zero = arith.constant 0.0 : f32
// CHECK: omp.parallel
@@ -42,7 +42,7 @@ func.func @reduction1(%arg0 : index, %arg1 : index, %arg2 : index,
// CHECK: omp.yield
}
// CHECK: omp.terminator
- // CHECK: llvm.load %[[BUF]]
+ // CHECK: ptr.load %[[BUF]]
return
}
@@ -181,8 +181,8 @@ func.func @reduction3(%arg0 : index, %arg1 : index, %arg2 : index,
// CHECK: atomic
// CHECK: ^{{.*}}(%[[ARG0:.*]]: !llvm.ptr, %[[ARG1:.*]]: !llvm.ptr):
-// CHECK: %[[RHS:.*]] = llvm.load %[[ARG1]] : !llvm.ptr -> i64
-// CHECK: llvm.atomicrmw max %[[ARG0]], %[[RHS]] monotonic
+// CHECK: %[[RHS:.*]] = ptr.load %[[ARG1]] : !llvm.ptr -> i64
+// CHECK: ptr.atomicrmw max %[[ARG0]], %[[RHS]] monotonic
// CHECK-LABEL: @reduction4
func.func @reduction4(%arg0 : index, %arg1 : index, %arg2 : index,
@@ -193,9 +193,9 @@ func.func @reduction4(%arg0 : index, %arg1 : index, %arg2 : index,
// CHECK: %[[IONE:.*]] = arith.constant 1
%ione = arith.constant 1 : i64
// CHECK: %[[BUF1:.*]] = llvm.alloca %{{.*}} x f32
- // CHECK: llvm.store %[[ZERO]], %[[BUF1]]
+ // CHECK: ptr.store %[[ZERO]], %[[BUF1]]
// CHECK: %[[BUF2:.*]] = llvm.alloca %{{.*}} x i64
- // CHECK: llvm.store %[[IONE]], %[[BUF2]]
+ // CHECK: ptr.store %[[IONE]], %[[BUF2]]
// CHECK: omp.parallel
// CHECK: omp.wsloop
@@ -223,8 +223,8 @@ func.func @reduction4(%arg0 : index, %arg1 : index, %arg2 : index,
// CHECK: omp.yield
}
// CHECK: omp.terminator
- // CHECK: %[[RES1:.*]] = llvm.load %[[BUF1]] : !llvm.ptr -> f32
- // CHECK: %[[RES2:.*]] = llvm.load %[[BUF2]] : !llvm.ptr -> i64
+ // CHECK: %[[RES1:.*]] = ptr.load %[[BUF1]] : !llvm.ptr -> f32
+ // CHECK: %[[RES2:.*]] = ptr.load %[[BUF2]] : !llvm.ptr -> i64
// CHECK: return %[[RES1]], %[[RES2]]
return %res#0, %res#1 : f32, i64
}
diff --git a/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir b/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir
index 1847975b279afa..1f8a35dd8e2361 100644
--- a/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir
+++ b/mlir/test/Conversion/SPIRVToLLVM/memory-ops-to-llvm.mlir
@@ -94,7 +94,7 @@ spirv.module Logical GLSL450 {
// CHECK-LABEL: @load
spirv.func @load() "None" {
%0 = spirv.Variable : !spirv.ptr<f32, Function>
- // CHECK: llvm.load %{{.*}} : !llvm.ptr -> f32
+ // CHECK: ptr.load %{{.*}} : !llvm.ptr -> f32
%1 = spirv.Load "Function" %0 : f32
spirv.Return
}
@@ -102,7 +102,7 @@ spirv.func @load() "None" {
// CHECK-LABEL: @load_none
spirv.func @load_none() "None" {
%0 = spirv.Variable : !spirv.ptr<f32, Function>
- // CHECK: llvm.load %{{.*}} : !llvm.ptr -> f32
+ // CHECK: ptr.load %{{.*}} : !llvm.ptr -> f32
%1 = spirv.Load "Function" %0 ["None"] : f32
spirv.Return
}
@@ -110,7 +110,7 @@ spirv.func @load_none() "None" {
// CHECK-LABEL: @load_with_alignment
spirv.func @load_with_alignment() "None" {
%0 = spirv.Variable : !spirv.ptr<f32, Function>
- // CHECK: llvm.load %{{.*}} {alignment = 4 : i64} : !llvm.ptr -> f32
+ // CHECK: ptr.load %{{.*}} {alignment = 4 : i64} : !llvm.ptr -> f32
%1 = spirv.Load "Function" %0 ["Aligned", 4] : f32
spirv.Return
}
@@ -118,7 +118,7 @@ spirv.func @load_with_alignment() "None" {
// CHECK-LABEL: @load_volatile
spirv.func @load_volatile() "None" {
%0 = spirv.Variable : !spirv.ptr<f32, Function>
- // CHECK: llvm.load volatile %{{.*}} : !llvm.ptr -> f32
+ // CHECK: ptr.load volatile %{{.*}} : !llvm.ptr -> f32
%1 = spirv.Load "Function" %0 ["Volatile"] : f32
spirv.Return
}
@@ -126,7 +126,7 @@ spirv.func @load_volatile() "None" {
// CHECK-LABEL: @load_nontemporal
spirv.func @load_nontemporal() "None" {
%0 = spirv.Variable : !spirv.ptr<f32, Function>
- // CHECK: llvm.load %{{.*}} {nontemporal} : !llvm.ptr -> f32
+ // CHECK: ptr.load %{{.*}} {nontemporal} : !llvm.ptr -> f32
%1 = spirv.Load "Function" %0 ["Nontemporal"] : f32
spirv.Return
}
@@ -138,7 +138,7 @@ spirv.func @load_nontemporal() "None" {
// CHECK-LABEL: @store
spirv.func @store(%arg0 : f32) "None" {
%0 = spirv.Variable : !spirv.ptr<f32, Function>
- // CHECK: llvm.store %{{.*}}, %{{.*}} : f32, !llvm.ptr
+ // CHECK: ptr.store %{{.*}}, %{{.*}} : f32, !llvm.ptr
spirv.Store "Function" %0, %arg0 : f32
spirv.Return
}
@@ -146,7 +146,7 @@ spirv.func @store(%arg0 : f32) "None" {
// CHECK-LABEL: @store_composite
spirv.func @store_composite(%arg0 : !spirv.struct<(f64)>) "None" {
%0 = spirv.Variable : !spirv.ptr<!spirv.struct<(f64)>, Function>
- // CHECK: llvm.store %{{.*}}, %{{.*}} : !llvm.struct<packed (f64)>, !llvm.ptr
+ // CHECK: ptr.store %{{.*}}, %{{.*}} : !llvm.struct<packed (f64)>, !llvm.ptr
spirv.Store "Function" %0, %arg0 : !spirv.struct<(f64)>
spirv.Return
}
@@ -154,7 +154,7 @@ spirv.func @store_composite(%arg0 : !spirv.struct<(f64)>) "None" {
// CHECK-LABEL: @store_with_alignment
spirv.func @store_with_alignment(%arg0 : f32) "None" {
%0 = spirv.Variable : !spirv.ptr<f32, Function>
- // CHECK: llvm.store %{{.*}}, %{{.*}} {alignment = 4 : i64} : f32, !llvm.ptr
+ // CHECK: ptr.store %{{.*}}, %{{.*}} {alignment = 4 : i64} : f32, !llvm.ptr
spirv.Store "Function" %0, %arg0 ["Aligned", 4] : f32
spirv.Return
}
@@ -162,7 +162,7 @@ spirv.func @store_with_alignment(%arg0 : f32) "None" {
// CHECK-LABEL: @store_volatile
spirv.func @store_volatile(%arg0 : f32) "None" {
%0 = spirv.Variable : !spirv.ptr<f32, Function>
- // CHECK: llvm.store volatile %{{.*}}, %{{.*}} : f32, !llvm.ptr
+ // CHECK: ptr.store volatile %{{.*}}, %{{.*}} : f32, !llvm.ptr
spirv.Store "Function" %0, %arg0 ["Volatile"] : f32
spirv.Return
}
@@ -170,7 +170,7 @@ spirv.func @store_volatile(%arg0 : f32) "None" {
// CHECK-LABEL: @store_nontemporal
spirv.func @store_nontemporal(%arg0 : f32) "None" {
%0 = spirv.Variable : !spirv.ptr<f32, Function>
- // CHECK: llvm.store %{{.*}}, %{{.*}} {nontemporal} : f32, !llvm.ptr
+ // CHECK: ptr.store %{{.*}}, %{{.*}} {nontemporal} : f32, !llvm.ptr
spirv.Store "Function" %0, %arg0 ["Nontemporal"] : f32
spirv.Return
}
@@ -195,7 +195,7 @@ spirv.func @variable_scalar_with_initialization() "None" {
// CHECK: %[[VALUE:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[ALLOCATED:.*]] = llvm.alloca %[[SIZE]] x i64 : (i32) -> !llvm.ptr
- // CHECK: llvm.store %[[VALUE]], %[[ALLOCATED]] : i64, !llvm.ptr
+ // CHECK: ptr.store %[[VALUE]], %[[ALLOCATED]] : i64, !llvm.ptr
%c = spirv.Constant 0 : i64
%0 = spirv.Variable init(%c) : !spirv.ptr<i64, Function>
spirv.Return
@@ -214,7 +214,7 @@ spirv.func @variable_vector_with_initialization() "None" {
// CHECK: %[[VALUE:.*]] = llvm.mlir.constant(dense<false> : vector<3xi1>) : vector<3xi1>
// CHECK: %[[SIZE:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[ALLOCATED:.*]] = llvm.alloca %[[SIZE]] x vector<3xi1> : (i32) -> !llvm.ptr
- // CHECK: llvm.store %[[VALUE]], %[[ALLOCATED]] : vector<3xi1>, !llvm.ptr
+ // CHECK: ptr.store %[[VALUE]], %[[ALLOCATED]] : vector<3xi1>, !llvm.ptr
%c = spirv.Constant dense<false> : vector<3xi1>
%0 = spirv.Variable init(%c) : !spirv.ptr<vector<3xi1>, Function>
spirv.Return
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-scalable-memcpy.mlir b/mlir/test/Conversion/VectorToLLVM/vector-scalable-memcpy.mlir
index 811b10721bf284..ab292efbe666d4 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-scalable-memcpy.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-scalable-memcpy.mlir
@@ -13,11 +13,11 @@ func.func @vector_scalable_memcopy(%src : memref<?xf32>, %dst : memref<?xf32>, %
// CHECK: [[DATAIDX:%[0-9]+]] = builtin.unrealized_conversion_cast [[LOOPIDX]] : index to i64
// CHECK: [[SRCMEM:%[0-9]+]] = llvm.extractvalue [[SRCMRS]][1] : !llvm.struct<(ptr
// CHECK-NEXT: [[SRCPTR:%[0-9]+]] = llvm.getelementptr [[SRCMEM]]{{.}}[[DATAIDX]]{{.}} : (!llvm.ptr, i64) -> !llvm.ptr, f32
- // CHECK-NEXT: [[LDVAL:%[0-9]+]] = llvm.load [[SRCPTR]]{{.*}}: !llvm.ptr -> vector<[4]xf32>
+ // CHECK-NEXT: [[LDVAL:%[0-9]+]] = ptr.load [[SRCPTR]]{{.*}}: !llvm.ptr -> vector<[4]xf32>
%0 = vector.load %src[%i0] : memref<?xf32>, vector<[4]xf32>
// CHECK: [[DSTMEM:%[0-9]+]] = llvm.extractvalue [[DSTMRS]][1] : !llvm.struct<(ptr
// CHECK-NEXT: [[DSTPTR:%[0-9]+]] = llvm.getelementptr [[DSTMEM]]{{.}}[[DATAIDX]]{{.}} : (!llvm.ptr, i64) -> !llvm.ptr, f32
- // CHECK-NEXT: llvm.store [[LDVAL]], [[DSTPTR]]{{.*}}: vector<[4]xf32>, !llvm.ptr
+ // CHECK-NEXT: ptr.store [[LDVAL]], [[DSTPTR]]{{.*}}: vector<[4]xf32>, !llvm.ptr
vector.store %0, %dst[%i0] : memref<?xf32>, vector<[4]xf32>
}
diff --git a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
index 7ea0197bdecb36..cd02690adaaa1d 100644
--- a/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
+++ b/mlir/test/Conversion/VectorToLLVM/vector-to-llvm.mlir
@@ -1785,7 +1785,7 @@ func.func @transfer_read_1d_inbounds(%A : memref<?xf32>, %base: index) -> vector
// CHECK-SAME: (!llvm.ptr, i64) -> !llvm.ptr, f32
//
// 2. Rewrite as a load.
-// CHECK: %[[loaded:.*]] = llvm.load %[[gep]] {alignment = 4 : i64} : !llvm.ptr -> vector<17xf32>
+// CHECK: %[[loaded:.*]] = ptr.load %[[gep]] {alignment = 4 : i64} : !llvm.ptr -> vector<17xf32>
// -----
@@ -2019,7 +2019,7 @@ func.func @vector_load_op(%memref : memref<200x100xf32>, %i : index, %j : index)
// CHECK: %[[mul:.*]] = llvm.mul %{{.*}}, %[[c100]] : i64
// CHECK: %[[add:.*]] = llvm.add %[[mul]], %{{.*}} : i64
// CHECK: %[[gep:.*]] = llvm.getelementptr %{{.*}}[%[[add]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK: llvm.load %[[gep]] {alignment = 4 : i64} : !llvm.ptr -> vector<8xf32>
+// CHECK: ptr.load %[[gep]] {alignment = 4 : i64} : !llvm.ptr -> vector<8xf32>
// -----
@@ -2028,7 +2028,7 @@ func.func @vector_load_op_index(%memref : memref<200x100xindex>, %i : index, %j
return %0 : vector<8xindex>
}
// CHECK-LABEL: func @vector_load_op_index
-// CHECK: %[[T0:.*]] = llvm.load %{{.*}} {alignment = 8 : i64} : !llvm.ptr -> vector<8xi64>
+// CHECK: %[[T0:.*]] = ptr.load %{{.*}} {alignment = 8 : i64} : !llvm.ptr -> vector<8xi64>
// CHECK: %[[T1:.*]] = builtin.unrealized_conversion_cast %[[T0]] : vector<8xi64> to vector<8xindex>
// CHECK: return %[[T1]] : vector<8xindex>
@@ -2045,7 +2045,7 @@ func.func @vector_store_op(%memref : memref<200x100xf32>, %i : index, %j : index
// CHECK: %[[mul:.*]] = llvm.mul %{{.*}}, %[[c100]] : i64
// CHECK: %[[add:.*]] = llvm.add %[[mul]], %{{.*}} : i64
// CHECK: %[[gep:.*]] = llvm.getelementptr %{{.*}}[%[[add]]] : (!llvm.ptr, i64) -> !llvm.ptr, f32
-// CHECK: llvm.store %{{.*}}, %[[gep]] {alignment = 4 : i64} : vector<4xf32>, !llvm.ptr
+// CHECK: ptr.store %{{.*}}, %[[gep]] {alignment = 4 : i64} : vector<4xf32>, !llvm.ptr
// -----
@@ -2055,7 +2055,7 @@ func.func @vector_store_op_index(%memref : memref<200x100xindex>, %i : index, %j
return
}
// CHECK-LABEL: func @vector_store_op_index
-// CHECK: llvm.store %{{.*}}, %{{.*}} {alignment = 8 : i64} : vector<4xi64>, !llvm.ptr
+// CHECK: ptr.store %{{.*}}, %{{.*}} {alignment = 8 : i64} : vector<4xi64>, !llvm.ptr
// -----
diff --git a/mlir/test/Dialect/LLVMIR/canonicalize.mlir b/mlir/test/Dialect/LLVMIR/canonicalize.mlir
index 5e26fa37b681d7..fd55a483546efc 100644
--- a/mlir/test/Dialect/LLVMIR/canonicalize.mlir
+++ b/mlir/test/Dialect/LLVMIR/canonicalize.mlir
@@ -105,7 +105,7 @@ llvm.func @fold_bitcast2(%x : i32) -> i32 {
// CHECK-SAME: %[[a0:arg[0-9]+]]
// CHECK-NEXT: llvm.return %[[a0]]
llvm.func @fold_addrcast(%x : !llvm.ptr) -> !llvm.ptr {
- %c = llvm.addrspacecast %x : !llvm.ptr to !llvm.ptr
+ %c = ptr.addrspacecast %x : !llvm.ptr to !llvm.ptr
llvm.return %c : !llvm.ptr
}
@@ -113,8 +113,8 @@ llvm.func @fold_addrcast(%x : !llvm.ptr) -> !llvm.ptr {
// CHECK-SAME: %[[a0:arg[0-9]+]]
// CHECK-NEXT: llvm.return %[[a0]]
llvm.func @fold_addrcast2(%x : !llvm.ptr) -> !llvm.ptr {
- %c = llvm.addrspacecast %x : !llvm.ptr to !llvm.ptr<5>
- %d = llvm.addrspacecast %c : !llvm.ptr<5> to !llvm.ptr
+ %c = ptr.addrspacecast %x : !llvm.ptr to !llvm.ptr<5>
+ %d = ptr.addrspacecast %c : !llvm.ptr<5> to !llvm.ptr
llvm.return %d : !llvm.ptr
}
@@ -171,7 +171,7 @@ llvm.func @llvm_constant() -> i32 {
// CHECK-LABEL: load_dce
// CHECK-NEXT: llvm.return
llvm.func @load_dce(%x : !llvm.ptr) {
- %0 = llvm.load %x : !llvm.ptr -> i8
+ %0 = ptr.load %x : !llvm.ptr -> i8
llvm.return
}
@@ -198,13 +198,13 @@ llvm.func @alloca_dce() {
llvm.func @volatile_load(%x : !llvm.ptr) {
// A volatile load may have side-effects such as a write operation to arbitrary memory.
// Make sure it is not removed.
- // CHECK: llvm.load volatile
- %0 = llvm.load volatile %x : !llvm.ptr -> i8
+ // CHECK: ptr.load volatile
+ %0 = ptr.load volatile %x : !llvm.ptr -> i8
// Same with monotonic atomics and any stricter modes.
- // CHECK: llvm.load %{{.*}} atomic monotonic
- %2 = llvm.load %x atomic monotonic { alignment = 1 } : !llvm.ptr -> i8
+ // CHECK: ptr.load %{{.*}} atomic monotonic
+ %2 = ptr.load %x atomic monotonic { alignment = 1 } : !llvm.ptr -> i8
// But not unordered!
- // CHECK-NOT: llvm.load %{{.*}} atomic unordered
- %3 = llvm.load %x atomic unordered { alignment = 1 } : !llvm.ptr -> i8
+ // CHECK-NOT: ptr.load %{{.*}} atomic unordered
+ %3 = ptr.load %x atomic unordered { alignment = 1 } : !llvm.ptr -> i8
llvm.return
}
diff --git a/mlir/test/Dialect/LLVMIR/inlining-alias-scopes.mlir b/mlir/test/Dialect/LLVMIR/inlining-alias-scopes.mlir
index 29450833bee598..44f74e8eae531d 100644
--- a/mlir/test/Dialect/LLVMIR/inlining-alias-scopes.mlir
+++ b/mlir/test/Dialect/LLVMIR/inlining-alias-scopes.mlir
@@ -14,27 +14,27 @@
// CHECK-LABEL: llvm.func @foo
// CHECK: llvm.intr.experimental.noalias.scope.decl #[[$FOO_LOAD]]
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-SAME: alias_scopes = [#[[$FOO_LOAD]]]
// CHECK-SAME: noalias_scopes = [#[[$FOO_STORE]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: alias_scopes = [#[[$FOO_STORE]]]
// CHECK-SAME: noalias_scopes = [#[[$FOO_LOAD]]]
llvm.func @foo(%arg0: !llvm.ptr, %arg1: !llvm.ptr) {
%0 = llvm.mlir.constant(5 : i64) : i64
llvm.intr.experimental.noalias.scope.decl #alias_scope
- %2 = llvm.load %arg1 {alias_scopes = [#alias_scope], alignment = 4 : i64, noalias_scopes = [#alias_scope1]} : !llvm.ptr -> f32
+ %2 = ptr.load %arg1 {alias_scopes = [#alias_scope], alignment = 4 : i64, noalias_scopes = [#alias_scope1]} : !llvm.ptr -> f32
%3 = llvm.getelementptr inbounds %arg0[%0] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %2, %3 {alias_scopes = [#alias_scope1], alignment = 4 : i64, noalias_scopes = [#alias_scope]} : f32, !llvm.ptr
+ ptr.store %2, %3 {alias_scopes = [#alias_scope1], alignment = 4 : i64, noalias_scopes = [#alias_scope]} : f32, !llvm.ptr
llvm.return
}
// CHECK-LABEL: llvm.func @bar
// CHECK: llvm.intr.experimental.noalias.scope.decl #[[$BAR_LOAD]]
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-SAME: alias_scopes = [#[[$BAR_LOAD]]]
// CHECK-SAME: noalias_scopes = [#[[$BAR_STORE]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: alias_scopes = [#[[$BAR_STORE]]]
// CHECK-SAME: noalias_scopes = [#[[$BAR_LOAD]]]
llvm.func @bar(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) {
@@ -66,30 +66,30 @@ llvm.func @bar(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) {
// CHECK-DAG: #[[$WITH_DOMAIN_ALIAS_SCOPE2:.*]] = #llvm.alias_scope<id = {{.*}}, domain = #[[WITH_DOMAIN_ALIAS]], description = {{.*}}>
// CHECK-LABEL: llvm.func @callee_with_metadata(
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-SAME: noalias_scopes = [#[[$WITH_DOMAIN_SCOPE1]], #[[$WITH_DOMAIN_SCOPE2]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: alias_scopes = [#[[$WITH_DOMAIN_SCOPE1]]]
// CHECK-SAME: noalias_scopes = [#[[$WITH_DOMAIN_SCOPE2]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: alias_scopes = [#[[$WITH_DOMAIN_SCOPE2]]]
// CHECK-SAME: noalias_scopes = [#[[$WITH_DOMAIN_SCOPE1]]]
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-NOT: {{(no)?}}alias_scopes =
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-NOT: {{(no)?}}alias_scopes =
llvm.func @callee_with_metadata(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) {
%0 = llvm.mlir.constant(5 : i64) : i64
%1 = llvm.mlir.constant(8 : i64) : i64
%2 = llvm.mlir.constant(7 : i64) : i64
- %3 = llvm.load %arg2 {alignment = 4 : i64, noalias_scopes = [#alias_scope, #alias_scope1]} : !llvm.ptr -> f32
+ %3 = ptr.load %arg2 {alignment = 4 : i64, noalias_scopes = [#alias_scope, #alias_scope1]} : !llvm.ptr -> f32
%4 = llvm.getelementptr inbounds %arg0[%0] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %3, %4 {alias_scopes = [#alias_scope], alignment = 4 : i64, noalias_scopes = [#alias_scope1]} : f32, !llvm.ptr
+ ptr.store %3, %4 {alias_scopes = [#alias_scope], alignment = 4 : i64, noalias_scopes = [#alias_scope1]} : f32, !llvm.ptr
%5 = llvm.getelementptr inbounds %arg1[%1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %3, %5 {alias_scopes = [#alias_scope1], alignment = 4 : i64, noalias_scopes = [#alias_scope]} : f32, !llvm.ptr
- %6 = llvm.load %arg2 {alignment = 4 : i64} : !llvm.ptr -> f32
+ ptr.store %3, %5 {alias_scopes = [#alias_scope1], alignment = 4 : i64, noalias_scopes = [#alias_scope]} : f32, !llvm.ptr
+ %6 = ptr.load %arg2 {alignment = 4 : i64} : !llvm.ptr -> f32
%7 = llvm.getelementptr inbounds %arg0[%2] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %6, %7 {alignment = 4 : i64} : f32, !llvm.ptr
+ ptr.store %6, %7 {alignment = 4 : i64} : f32, !llvm.ptr
llvm.return
}
@@ -100,95 +100,95 @@ llvm.func @callee_without_metadata(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !l
%0 = llvm.mlir.constant(5 : i64) : i64
%1 = llvm.mlir.constant(8 : i64) : i64
%2 = llvm.mlir.constant(7 : i64) : i64
- %3 = llvm.load %arg2 {alignment = 4 : i64} : !llvm.ptr -> f32
+ %3 = ptr.load %arg2 {alignment = 4 : i64} : !llvm.ptr -> f32
%4 = llvm.getelementptr inbounds %arg0[%0] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %3, %4 {alignment = 4 : i64} : f32, !llvm.ptr
+ ptr.store %3, %4 {alignment = 4 : i64} : f32, !llvm.ptr
%5 = llvm.getelementptr inbounds %arg1[%1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %3, %5 {alignment = 4 : i64} : f32, !llvm.ptr
- %6 = llvm.load %arg2 {alignment = 4 : i64} : !llvm.ptr -> f32
+ ptr.store %3, %5 {alignment = 4 : i64} : f32, !llvm.ptr
+ %6 = ptr.load %arg2 {alignment = 4 : i64} : !llvm.ptr -> f32
%7 = llvm.getelementptr inbounds %arg0[%2] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %6, %7 {alignment = 4 : i64} : f32, !llvm.ptr
+ ptr.store %6, %7 {alignment = 4 : i64} : f32, !llvm.ptr
llvm.return
}
// CHECK-LABEL: llvm.func @caller(
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-SAME: alias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
// CHECK-NOT: noalias_scopes
// Inlining @callee_with_metadata with noalias_scopes.
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-SAME: noalias_scopes = [#[[$WITH_DOMAIN_NO_ALIAS_SCOPE1]], #[[$WITH_DOMAIN_NO_ALIAS_SCOPE2]], #[[$CALL_DOMAIN_SCOPE]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: alias_scopes = [#[[$WITH_DOMAIN_NO_ALIAS_SCOPE1]]]
// CHECK-SAME: noalias_scopes = [#[[$WITH_DOMAIN_NO_ALIAS_SCOPE2]], #[[$CALL_DOMAIN_SCOPE]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: alias_scopes = [#[[$WITH_DOMAIN_NO_ALIAS_SCOPE2]]]
// CHECK-SAME: noalias_scopes = [#[[$WITH_DOMAIN_NO_ALIAS_SCOPE1]], #[[$CALL_DOMAIN_SCOPE]]]
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-NOT: alias_scopes
// CHECK-SAME: noalias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-NOT: alias_scopes
// CHECK-SAME: noalias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
// Inlining @callee_with_metadata with alias_scopes.
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-SAME: alias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
// CHECK-SAME: noalias_scopes = [#[[$WITH_DOMAIN_ALIAS_SCOPE1]], #[[$WITH_DOMAIN_ALIAS_SCOPE2]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: alias_scopes = [#[[$WITH_DOMAIN_ALIAS_SCOPE1]], #[[$CALL_DOMAIN_SCOPE]]]
// CHECK-SAME: noalias_scopes = [#[[$WITH_DOMAIN_ALIAS_SCOPE2]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: alias_scopes = [#[[$WITH_DOMAIN_ALIAS_SCOPE2]], #[[$CALL_DOMAIN_SCOPE]]]
// CHECK-SAME: noalias_scopes = [#[[$WITH_DOMAIN_ALIAS_SCOPE1]]]
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-SAME: alias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
// CHECK-NOT: noalias_scopes
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: alias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
// CHECK-NOT: noalias_scopes
// Inlining @callee_without_metadata with noalias_scopes.
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-NOT: alias_scopes
// CHECK-SAME: noalias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-NOT: alias_scopes
// CHECK-SAME: noalias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-NOT: alias_scopes
// CHECK-SAME: noalias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-NOT: alias_scopes
// CHECK-SAME: noalias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-NOT: alias_scopes
// CHECK-SAME: noalias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
// Inlining @callee_without_metadata with alias_scopes.
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-SAME: alias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
// CHECK-NOT: noalias_scopes
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: alias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
// CHECK-NOT: noalias_scopes
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: alias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
// CHECK-NOT: noalias_scopes
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-SAME: alias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
// CHECK-NOT: noalias_scopes
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: alias_scopes = [#[[$CALL_DOMAIN_SCOPE]]]
// CHECK-NOT: noalias_scopes
llvm.func @caller(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) {
- %0 = llvm.load %arg2 {alias_scopes = [#alias_scope2], alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr
+ %0 = ptr.load %arg2 {alias_scopes = [#alias_scope2], alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr
llvm.call @callee_with_metadata(%arg0, %arg1, %0) {noalias_scopes = [#alias_scope2]} : (!llvm.ptr, !llvm.ptr, !llvm.ptr) -> ()
llvm.call @callee_with_metadata(%arg1, %arg1, %arg0) {alias_scopes = [#alias_scope2]} : (!llvm.ptr, !llvm.ptr, !llvm.ptr) -> ()
llvm.call @callee_without_metadata(%arg0, %arg1, %0) {noalias_scopes = [#alias_scope2]} : (!llvm.ptr, !llvm.ptr, !llvm.ptr) -> ()
@@ -204,17 +204,17 @@ llvm.func @caller(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) {
llvm.func @foo(%arg0: !llvm.ptr {llvm.noalias}, %arg1: !llvm.ptr {llvm.noalias}) {
%0 = llvm.mlir.constant(5 : i64) : i64
- %1 = llvm.load %arg1 {alignment = 4 : i64} : !llvm.ptr -> f32
+ %1 = ptr.load %arg1 {alignment = 4 : i64} : !llvm.ptr -> f32
%2 = llvm.getelementptr inbounds %arg0[%0] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %1, %2 {alignment = 4 : i64} : f32, !llvm.ptr
+ ptr.store %1, %2 {alignment = 4 : i64} : f32, !llvm.ptr
llvm.return
}
// CHECK-LABEL: llvm.func @bar
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-SAME: alias_scopes = [#[[$ARG1_SCOPE]]]
// CHECK-SAME: noalias_scopes = [#[[$ARG0_SCOPE]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: alias_scopes = [#[[$ARG0_SCOPE]]]
// CHECK-SAME: noalias_scopes = [#[[$ARG1_SCOPE]]]
llvm.func @bar(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) {
@@ -233,7 +233,7 @@ llvm.func @might_return_arg_derived(!llvm.ptr) -> !llvm.ptr
llvm.func @foo(%arg0: !llvm.ptr {llvm.noalias}, %arg1: !llvm.ptr {llvm.noalias}) {
%0 = llvm.mlir.constant(5 : i64) : i32
%1 = llvm.call @might_return_arg_derived(%arg0) : (!llvm.ptr) -> !llvm.ptr
- llvm.store %0, %1 : i32, !llvm.ptr
+ ptr.store %0, %1 : i32, !llvm.ptr
llvm.return
}
@@ -241,7 +241,7 @@ llvm.func @foo(%arg0: !llvm.ptr {llvm.noalias}, %arg1: !llvm.ptr {llvm.noalias})
// CHECK: llvm.call
// CHECK-NOT: alias_scopes
// CHECK-SAME: noalias_scopes = [#[[$ARG1_SCOPE]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-NOT: alias_scopes
// CHECK-NOT: noalias_scopes
llvm.func @bar(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) {
@@ -263,7 +263,7 @@ llvm.func @block_arg(%arg0: !llvm.ptr {llvm.noalias}, %arg1: !llvm.ptr {llvm.noa
llvm.cond_br %1, ^bb0(%arg0 : !llvm.ptr), ^bb0(%arg1 : !llvm.ptr)
^bb0(%arg2: !llvm.ptr):
- llvm.store %0, %arg2 : i32, !llvm.ptr
+ ptr.store %0, %arg2 : i32, !llvm.ptr
llvm.return
}
@@ -271,7 +271,7 @@ llvm.func @block_arg(%arg0: !llvm.ptr {llvm.noalias}, %arg1: !llvm.ptr {llvm.noa
// CHECK: llvm.call
// CHECK-NOT: alias_scopes
// CHECK-SAME: noalias_scopes = [#[[$ARG0_SCOPE]], #[[$ARG1_SCOPE]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK: alias_scopes = [#[[$ARG0_SCOPE]], #[[$ARG1_SCOPE]]]
llvm.func @bar(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) {
llvm.call @block_arg(%arg0, %arg2) : (!llvm.ptr, !llvm.ptr) -> ()
@@ -294,7 +294,7 @@ llvm.func @block_arg(%arg0: !llvm.ptr {llvm.noalias}, %arg1: !llvm.ptr {llvm.noa
llvm.cond_br %3, ^bb0(%arg0 : !llvm.ptr), ^bb0(%2 : !llvm.ptr)
^bb0(%arg2: !llvm.ptr):
- llvm.store %0, %arg2 : i32, !llvm.ptr
+ ptr.store %0, %arg2 : i32, !llvm.ptr
llvm.return
}
@@ -302,7 +302,7 @@ llvm.func @block_arg(%arg0: !llvm.ptr {llvm.noalias}, %arg1: !llvm.ptr {llvm.noa
// CHECK: llvm.call
// CHECK-NOT: alias_scopes
// CHECK-SAME: noalias_scopes = [#[[$ARG0_SCOPE]], #[[$ARG1_SCOPE]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-NOT: alias_scopes
// CHECK-SAME: noalias_scopes = [#[[$ARG1_SCOPE]]]
llvm.func @bar(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) {
@@ -326,7 +326,7 @@ llvm.func @unknown_object(%arg0: !llvm.ptr {llvm.noalias}, %arg1: !llvm.ptr {llv
llvm.cond_br %1, ^bb0(%arg0 : !llvm.ptr), ^bb0(%2 : !llvm.ptr)
^bb0(%arg2: !llvm.ptr):
- llvm.store %0, %arg2 : i32, !llvm.ptr
+ ptr.store %0, %arg2 : i32, !llvm.ptr
llvm.return
}
@@ -337,7 +337,7 @@ llvm.func @unknown_object(%arg0: !llvm.ptr {llvm.noalias}, %arg1: !llvm.ptr {llv
// CHECK: llvm.call
// CHECK-NOT: alias_scopes
// CHECK-SAME: noalias_scopes = [#[[$ARG0_SCOPE]], #[[$ARG1_SCOPE]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-NOT: alias_scopes
// CHECK-NOT: noalias_scopes
llvm.func @bar(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) {
@@ -353,23 +353,23 @@ llvm.func @bar(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) {
llvm.func @supported_operations(%arg0: !llvm.ptr {llvm.noalias}, %arg1: !llvm.ptr {llvm.noalias}) {
%0 = llvm.mlir.constant(5 : i64) : i32
- llvm.store %0, %arg1 : i32, !llvm.ptr
- %1 = llvm.load %arg1 : !llvm.ptr -> i32
+ ptr.store %0, %arg1 : i32, !llvm.ptr
+ %1 = ptr.load %arg1 : !llvm.ptr -> i32
"llvm.intr.memcpy"(%arg0, %arg1, %1) <{ isVolatile = false }> : (!llvm.ptr, !llvm.ptr, i32) -> ()
"llvm.intr.memmove"(%arg0, %arg1, %1) <{ isVolatile = false }> : (!llvm.ptr, !llvm.ptr, i32) -> ()
"llvm.intr.memcpy.inline"(%arg0, %arg1) <{ isVolatile = false, len = 4 : i32}> : (!llvm.ptr, !llvm.ptr) -> ()
%2 = llvm.trunc %0 : i32 to i8
"llvm.intr.memset"(%arg0, %2, %1) <{ isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
%3 = llvm.cmpxchg %arg0, %0, %1 seq_cst seq_cst : !llvm.ptr, i32
- %4 = llvm.atomicrmw add %arg0, %0 seq_cst : !llvm.ptr, i32
+ %4 = ptr.atomicrmw add %arg0, %0 seq_cst : !llvm.ptr, i32
llvm.return
}
// CHECK-LABEL: llvm.func @bar
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: alias_scopes = [#[[$ARG1_SCOPE]]]
// CHECK-SAME: noalias_scopes = [#[[$ARG0_SCOPE]]]
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-SAME: alias_scopes = [#[[$ARG1_SCOPE]]]
// CHECK-SAME: noalias_scopes = [#[[$ARG0_SCOPE]]]
// CHECK: "llvm.intr.memcpy"
@@ -387,7 +387,7 @@ llvm.func @supported_operations(%arg0: !llvm.ptr {llvm.noalias}, %arg1: !llvm.pt
// CHECK: llvm.cmpxchg
// CHECK-SAME: alias_scopes = [#[[$ARG0_SCOPE]]]
// CHECK-SAME: noalias_scopes = [#[[$ARG1_SCOPE]]]
-// CHECK: llvm.atomicrmw
+// CHECK: ptr.atomicrmw
// CHECK-SAME: alias_scopes = [#[[$ARG0_SCOPE]]]
// CHECK-SAME: noalias_scopes = [#[[$ARG1_SCOPE]]]
llvm.func @bar(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) {
diff --git a/mlir/test/Dialect/LLVMIR/inlining.mlir b/mlir/test/Dialect/LLVMIR/inlining.mlir
index 63e7a46f1bdb06..cdbdcc61fd11e2 100644
--- a/mlir/test/Dialect/LLVMIR/inlining.mlir
+++ b/mlir/test/Dialect/LLVMIR/inlining.mlir
@@ -8,8 +8,8 @@
func.func @inner_func_inlinable(%ptr : !llvm.ptr) -> i32 {
%0 = llvm.mlir.constant(42 : i32) : i32
%stack = llvm.intr.stacksave : !llvm.ptr
- llvm.store %0, %ptr { alignment = 8 } : i32, !llvm.ptr
- %1 = llvm.load %ptr { alignment = 8 } : !llvm.ptr -> i32
+ ptr.store %0, %ptr { alignment = 8 } : i32, !llvm.ptr
+ %1 = ptr.load %ptr { alignment = 8 } : !llvm.ptr -> i32
llvm.intr.dbg.value #variable = %0 : i32
llvm.intr.dbg.declare #variableAddr = %ptr : !llvm.ptr
llvm.intr.dbg.label #label
@@ -20,7 +20,7 @@ func.func @inner_func_inlinable(%ptr : !llvm.ptr) -> i32 {
"llvm.intr.memcpy"(%ptr, %ptr, %0) <{isVolatile = true}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
"llvm.intr.assume"(%true) : (i1) -> ()
llvm.fence release
- %2 = llvm.atomicrmw add %ptr, %0 monotonic : !llvm.ptr, i32
+ %2 = ptr.atomicrmw add %ptr, %0 monotonic : !llvm.ptr, i32
%3 = llvm.cmpxchg %ptr, %0, %1 acq_rel monotonic : !llvm.ptr, i32
llvm.inline_asm has_side_effects "foo", "bar" : () -> ()
llvm.cond_br %true, ^bb1, ^bb2
@@ -36,8 +36,8 @@ func.func @inner_func_inlinable(%ptr : !llvm.ptr) -> i32 {
// CHECK-SAME: %[[PTR:[a-zA-Z0-9_]+]]
// CHECK: %[[CST:.*]] = llvm.mlir.constant(42
// CHECK: %[[STACK:.+]] = llvm.intr.stacksave
-// CHECK: llvm.store %[[CST]], %[[PTR]]
-// CHECK: %[[RES:.+]] = llvm.load %[[PTR]]
+// CHECK: ptr.store %[[CST]], %[[PTR]]
+// CHECK: %[[RES:.+]] = ptr.load %[[PTR]]
// CHECK: llvm.intr.dbg.value #{{.+}} = %[[CST]]
// CHECK: llvm.intr.dbg.declare #{{.+}} = %[[PTR]]
// CHECK: llvm.intr.dbg.label #{{.+}}
@@ -46,7 +46,7 @@ func.func @inner_func_inlinable(%ptr : !llvm.ptr) -> i32 {
// CHECK: "llvm.intr.memcpy"(%[[PTR]], %[[PTR]]
// CHECK: "llvm.intr.assume"
// CHECK: llvm.fence release
-// CHECK: llvm.atomicrmw add %[[PTR]], %[[CST]] monotonic
+// CHECK: ptr.atomicrmw add %[[PTR]], %[[CST]] monotonic
// CHECK: llvm.cmpxchg %[[PTR]], %[[CST]], %[[RES]] acq_rel monotonic
// CHECK: llvm.inline_asm has_side_effects "foo", "bar"
// CHECK: llvm.unreachable
@@ -208,14 +208,14 @@ llvm.func @caller() {
llvm.func @static_alloca() -> f32 {
%0 = llvm.mlir.constant(4 : i32) : i32
%1 = llvm.alloca %0 x f32 : (i32) -> !llvm.ptr
- %2 = llvm.load %1 : !llvm.ptr -> f32
+ %2 = ptr.load %1 : !llvm.ptr -> f32
llvm.return %2 : f32
}
llvm.func @dynamic_alloca(%size : i32) -> f32 {
%0 = llvm.add %size, %size : i32
%1 = llvm.alloca %0 x f32 : (i32) -> !llvm.ptr
- %2 = llvm.load %1 : !llvm.ptr -> f32
+ %2 = ptr.load %1 : !llvm.ptr -> f32
llvm.return %2 : f32
}
@@ -267,7 +267,7 @@ llvm.func @static_alloca_not_in_entry(%cond : i1) -> f32 {
%3 = llvm.alloca %2 x f32 : (i32) -> !llvm.ptr
llvm.br ^bb3(%3: !llvm.ptr)
^bb3(%ptr : !llvm.ptr):
- %4 = llvm.load %ptr : !llvm.ptr -> f32
+ %4 = ptr.load %ptr : !llvm.ptr -> f32
llvm.return %4 : f32
}
@@ -288,7 +288,7 @@ llvm.func @static_alloca(%cond: i1) -> f32 {
%1 = llvm.alloca %0 x f32 : (i32) -> !llvm.ptr
llvm.cond_br %cond, ^bb1, ^bb2
^bb1:
- %2 = llvm.load %1 : !llvm.ptr -> f32
+ %2 = ptr.load %1 : !llvm.ptr -> f32
llvm.return %2 : f32
^bb2:
%3 = llvm.mlir.constant(3.14192 : f32) : f32
@@ -312,7 +312,7 @@ llvm.func @test_inline(%cond0 : i1, %cond1 : i1, %funcArg : f32) -> f32 {
// Make sure the lifetime end intrinsic has been inserted at both former
// return sites of the callee.
// CHECK: ^[[BB2]]:
- // CHECK-NEXT: llvm.load
+ // CHECK-NEXT: ptr.load
// CHECK-NEXT: llvm.intr.lifetime.end 4, %[[PTR]]
// CHECK: ^[[BB3]]:
// CHECK-NEXT: llvm.intr.lifetime.end 4, %[[PTR]]
@@ -327,7 +327,7 @@ llvm.func @test_inline(%cond0 : i1, %cond1 : i1, %funcArg : f32) -> f32 {
llvm.func @static_alloca() -> f32 {
%0 = llvm.mlir.constant(4 : i32) : i32
%1 = llvm.alloca %0 x f32 : (i32) -> !llvm.ptr
- %2 = llvm.load %1 : !llvm.ptr -> f32
+ %2 = ptr.load %1 : !llvm.ptr -> f32
llvm.return %2 : f32
}
@@ -341,7 +341,7 @@ llvm.func @test_inline(%cond0 : i1) {
"test.one_region_op"() ({
%0 = llvm.call @static_alloca() : () -> f32
// CHECK-NEXT: llvm.intr.lifetime.start 4, %[[ALLOCA]]
- // CHECK-NEXT: %[[RES:.+]] = llvm.load %[[ALLOCA]]
+ // CHECK-NEXT: %[[RES:.+]] = ptr.load %[[ALLOCA]]
// CHECK-NEXT: llvm.intr.lifetime.end 4, %[[ALLOCA]]
// CHECK-NEXT: test.region_yield %[[RES]]
test.region_yield %0 : f32
@@ -375,7 +375,7 @@ llvm.func @alloca_with_lifetime(%cond: i1) -> f32 {
%0 = llvm.mlir.constant(4 : i32) : i32
%1 = llvm.alloca %0 x f32 : (i32) -> !llvm.ptr
llvm.intr.lifetime.start 4, %1 : !llvm.ptr
- %2 = llvm.load %1 : !llvm.ptr -> f32
+ %2 = ptr.load %1 : !llvm.ptr -> f32
llvm.intr.lifetime.end 4, %1 : !llvm.ptr
%3 = llvm.fadd %2, %2 : f32
llvm.return %3 : f32
@@ -392,7 +392,7 @@ llvm.func @test_inline(%cond0 : i1, %cond1 : i1, %funcArg : f32) -> f32 {
// Make sure the original lifetime intrinsic has been preserved, rather than
// inserting a new one with a larger scope.
// CHECK: llvm.intr.lifetime.start 4, %[[PTR]]
- // CHECK-NEXT: llvm.load %[[PTR]]
+ // CHECK-NEXT: ptr.load %[[PTR]]
// CHECK-NEXT: llvm.intr.lifetime.end 4, %[[PTR]]
// CHECK: llvm.fadd
// CHECK-NOT: llvm.intr.lifetime.end
@@ -607,7 +607,7 @@ llvm.func @test_disallow_arg_attr(%ptr : !llvm.ptr) {
#caller = #llvm.access_group<id = distinct[1]<>>
llvm.func @inlinee(%ptr : !llvm.ptr) -> i32 {
- %0 = llvm.load %ptr { access_groups = [#callee] } : !llvm.ptr -> i32
+ %0 = ptr.load %ptr { access_groups = [#callee] } : !llvm.ptr -> i32
llvm.return %0 : i32
}
@@ -615,7 +615,7 @@ llvm.func @inlinee(%ptr : !llvm.ptr) -> i32 {
// CHECK-DAG: #[[$CALLER:.*]] = #llvm.access_group<id = {{.*}}>
// CHECK-LABEL: func @caller
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-SAME: access_groups = [#[[$CALLEE]], #[[$CALLER]]]
llvm.func @caller(%ptr : !llvm.ptr) -> i32 {
%0 = llvm.call @inlinee(%ptr) { access_groups = [#caller] } : (!llvm.ptr) -> (i32)
@@ -627,20 +627,20 @@ llvm.func @caller(%ptr : !llvm.ptr) -> i32 {
#caller = #llvm.access_group<id = distinct[1]<>>
llvm.func @inlinee(%ptr : !llvm.ptr) -> i32 {
- %0 = llvm.load %ptr : !llvm.ptr -> i32
+ %0 = ptr.load %ptr : !llvm.ptr -> i32
llvm.return %0 : i32
}
// CHECK-DAG: #[[$CALLER:.*]] = #llvm.access_group<id = {{.*}}>
// CHECK-LABEL: func @caller
-// CHECK: llvm.load
+// CHECK: ptr.load
// CHECK-SAME: access_groups = [#[[$CALLER]]]
-// CHECK: llvm.store
+// CHECK: ptr.store
// CHECK-SAME: access_groups = [#[[$CALLER]]]
llvm.func @caller(%ptr : !llvm.ptr) -> i32 {
%c5 = llvm.mlir.constant(5 : i32) : i32
%0 = llvm.call @inlinee(%ptr) { access_groups = [#caller] } : (!llvm.ptr) -> (i32)
- llvm.store %c5, %ptr { access_groups = [#caller] } : i32, !llvm.ptr
+ ptr.store %c5, %ptr { access_groups = [#caller] } : i32, !llvm.ptr
llvm.return %0 : i32
}
diff --git a/mlir/test/Dialect/LLVMIR/invalid.mlir b/mlir/test/Dialect/LLVMIR/invalid.mlir
index d72ff8ca3c3aa7..3d177ef1b1014f 100644
--- a/mlir/test/Dialect/LLVMIR/invalid.mlir
+++ b/mlir/test/Dialect/LLVMIR/invalid.mlir
@@ -126,78 +126,78 @@ func.func @gep_too_few_dynamic(%base : !llvm.ptr) {
// -----
func.func @load_non_llvm_type(%foo : memref<f32>) {
- // expected-error at +1 {{op operand #0 must be LLVM pointer type}}
- llvm.load %foo : memref<f32> -> f32
+ // expected-error at +1 {{'ptr.load' op operand #0 must be Pointer type, but got 'memref<f32>'}}
+ ptr.load %foo : memref<f32> -> f32
}
// -----
func.func @load_syncscope(%ptr : !llvm.ptr) {
// expected-error at below {{expected syncscope to be null for non-atomic access}}
- %1 = "llvm.load"(%ptr) {syncscope = "singlethread"} : (!llvm.ptr) -> (f32)
+ %1 = "ptr.load"(%ptr) {syncscope = "singlethread"} : (!llvm.ptr) -> (f32)
}
// -----
func.func @load_unsupported_ordering(%ptr : !llvm.ptr) {
// expected-error at below {{unsupported ordering 'release'}}
- %1 = llvm.load %ptr atomic release {alignment = 4 : i64} : !llvm.ptr -> f32
+ %1 = ptr.load %ptr atomic release {alignment = 4 : i64} : !llvm.ptr -> f32
}
// -----
func.func @load_unsupported_type(%ptr : !llvm.ptr) {
// expected-error at below {{unsupported type 'f80' for atomic access}}
- %1 = llvm.load %ptr atomic monotonic {alignment = 16 : i64} : !llvm.ptr -> f80
+ %1 = ptr.load %ptr atomic monotonic {alignment = 16 : i64} : !llvm.ptr -> f80
}
// -----
func.func @load_unsupported_type(%ptr : !llvm.ptr) {
// expected-error at below {{unsupported type 'i1' for atomic access}}
- %1 = llvm.load %ptr atomic monotonic {alignment = 16 : i64} : !llvm.ptr -> i1
+ %1 = ptr.load %ptr atomic monotonic {alignment = 16 : i64} : !llvm.ptr -> i1
}
// -----
func.func @load_unaligned_atomic(%ptr : !llvm.ptr) {
// expected-error at below {{expected alignment for atomic access}}
- %1 = llvm.load %ptr atomic monotonic : !llvm.ptr -> f32
+ %1 = ptr.load %ptr atomic monotonic : !llvm.ptr -> f32
}
// -----
func.func @store_syncscope(%val : f32, %ptr : !llvm.ptr) {
// expected-error at below {{expected syncscope to be null for non-atomic access}}
- "llvm.store"(%val, %ptr) {syncscope = "singlethread"} : (f32, !llvm.ptr) -> ()
+ "ptr.store"(%val, %ptr) {syncscope = "singlethread"} : (f32, !llvm.ptr) -> ()
}
// -----
func.func @store_unsupported_ordering(%val : f32, %ptr : !llvm.ptr) {
// expected-error at below {{unsupported ordering 'acquire'}}
- llvm.store %val, %ptr atomic acquire {alignment = 4 : i64} : f32, !llvm.ptr
+ ptr.store %val, %ptr atomic acquire {alignment = 4 : i64} : f32, !llvm.ptr
}
// -----
func.func @store_unsupported_type(%val : f80, %ptr : !llvm.ptr) {
// expected-error at below {{unsupported type 'f80' for atomic access}}
- llvm.store %val, %ptr atomic monotonic {alignment = 16 : i64} : f80, !llvm.ptr
+ ptr.store %val, %ptr atomic monotonic {alignment = 16 : i64} : f80, !llvm.ptr
}
// -----
func.func @store_unsupported_type(%val : i1, %ptr : !llvm.ptr) {
// expected-error at below {{unsupported type 'i1' for atomic access}}
- llvm.store %val, %ptr atomic monotonic {alignment = 16 : i64} : i1, !llvm.ptr
+ ptr.store %val, %ptr atomic monotonic {alignment = 16 : i64} : i1, !llvm.ptr
}
// -----
func.func @store_unaligned_atomic(%val : f32, %ptr : !llvm.ptr) {
// expected-error at below {{expected alignment for atomic access}}
- llvm.store %val, %ptr atomic monotonic : f32, !llvm.ptr
+ ptr.store %val, %ptr atomic monotonic : f32, !llvm.ptr
}
// -----
@@ -599,7 +599,7 @@ func.func @nvvm_invalid_mma_8(%a0 : i32, %a1 : i32,
func.func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr, %f32 : f32) {
// expected-error at +1 {{op failed to verify that result #0 and operand #1 have the same type}}
- %0 = "llvm.atomicrmw"(%f32_ptr, %f32) {bin_op=11, ordering=1} : (!llvm.ptr, f32) -> i32
+ %0 = "ptr.atomicrmw"(%f32_ptr, %f32) {bin_op=11, ordering=1} : (!llvm.ptr, f32) -> i32
llvm.return
}
@@ -607,7 +607,7 @@ func.func @atomicrmw_mismatched_operands(%f32_ptr : !llvm.ptr, %f32 : f32) {
func.func @atomicrmw_expected_float(%i32_ptr : !llvm.ptr, %i32 : i32) {
// expected-error at +1 {{expected LLVM IR floating point type}}
- %0 = llvm.atomicrmw fadd %i32_ptr, %i32 unordered : !llvm.ptr, i32
+ %0 = ptr.atomicrmw fadd %i32_ptr, %i32 unordered : !llvm.ptr, i32
llvm.return
}
@@ -615,7 +615,7 @@ func.func @atomicrmw_expected_float(%i32_ptr : !llvm.ptr, %i32 : i32) {
func.func @atomicrmw_unexpected_xchg_type(%i1_ptr : !llvm.ptr, %i1 : i1) {
// expected-error at +1 {{unexpected LLVM IR type for 'xchg' bin_op}}
- %0 = llvm.atomicrmw xchg %i1_ptr, %i1 unordered : !llvm.ptr, i1
+ %0 = ptr.atomicrmw xchg %i1_ptr, %i1 unordered : !llvm.ptr, i1
llvm.return
}
@@ -623,7 +623,7 @@ func.func @atomicrmw_unexpected_xchg_type(%i1_ptr : !llvm.ptr, %i1 : i1) {
func.func @atomicrmw_expected_int(%f32_ptr : !llvm.ptr, %f32 : f32) {
// expected-error at +1 {{expected LLVM IR integer type}}
- %0 = llvm.atomicrmw max %f32_ptr, %f32 unordered : !llvm.ptr, f32
+ %0 = ptr.atomicrmw max %f32_ptr, %f32 unordered : !llvm.ptr, f32
llvm.return
}
@@ -863,7 +863,7 @@ llvm.mlir.global appending @non_array_type_global_appending_linkage() : i32
module {
llvm.func @accessGroups(%arg0 : !llvm.ptr) {
// expected-error at below {{attribute 'access_groups' failed to satisfy constraint: LLVM dialect access group metadata array}}
- %0 = llvm.load %arg0 { "access_groups" = [@func1] } : !llvm.ptr -> i32
+ %0 = ptr.load %arg0 { "access_groups" = [@func1] } : !llvm.ptr -> i32
llvm.return
}
llvm.func @func1() {
@@ -899,7 +899,7 @@ module {
module {
llvm.func @noAliasScopes(%arg0 : !llvm.ptr) {
// expected-error at below {{attribute 'noalias_scopes' failed to satisfy constraint: LLVM dialect alias scope array}}
- %0 = llvm.load %arg0 { "noalias_scopes" = "test" } : !llvm.ptr -> i32
+ %0 = ptr.load %arg0 { "noalias_scopes" = "test" } : !llvm.ptr -> i32
llvm.return
}
}
@@ -1172,7 +1172,7 @@ llvm.mlir.global internal @side_effecting_global() : !llvm.struct<(i8)> {
%0 = llvm.mlir.constant(1 : i64) : i64
// expected-error at below {{ops with side effects not allowed in global initializers}}
%1 = llvm.alloca %0 x !llvm.struct<(i8)> : (i64) -> !llvm.ptr
- %2 = llvm.load %1 : !llvm.ptr -> !llvm.struct<(i8)>
+ %2 = ptr.load %1 : !llvm.ptr -> !llvm.struct<(i8)>
llvm.return %2 : !llvm.struct<(i8)>
}
@@ -1250,14 +1250,14 @@ func.func @invalid_bitcast_ptr_to_vec(%arg : !llvm.ptr) {
// -----
func.func @invalid_bitcast_addr_cast(%arg : !llvm.ptr<1>) {
- // expected-error at +1 {{cannot cast pointers of different address spaces, use 'llvm.addrspacecast' instead}}
+ // expected-error at +1 {{cannot cast pointers of different address spaces, use 'ptr.addrspacecast' instead}}
%0 = llvm.bitcast %arg : !llvm.ptr<1> to !llvm.ptr
}
// -----
func.func @invalid_bitcast_addr_cast_vec(%arg : !llvm.vec<4 x ptr<1>>) {
- // expected-error at +1 {{cannot cast pointers of different address spaces, use 'llvm.addrspacecast' instead}}
+ // expected-error at +1 {{cannot cast pointers of different address spaces, use 'ptr.addrspacecast' instead}}
%0 = llvm.bitcast %arg : !llvm.vec<4 x ptr<1>> to !llvm.vec<4 x ptr>
}
@@ -1272,15 +1272,16 @@ func.func @invalid_target_ext_alloca() {
// -----
func.func @invalid_target_ext_load(%arg0 : !llvm.ptr) {
- // expected-error at +1 {{result #0 must be LLVM type with size, but got '!llvm.target<"no_load">'}}
- %0 = llvm.load %arg0 {alignment = 8 : i64} : !llvm.ptr -> !llvm.target<"no_load">
+ // expected-error at +1 {{type is not loadable}}
+ %0 = ptr.load %arg0 {alignment = 8 : i64} : !llvm.ptr -> !llvm.target<"no_load">
+ llvm.return
}
// -----
func.func @invalid_target_ext_atomic(%arg0 : !llvm.ptr) {
// expected-error at +1 {{unsupported type '!llvm.target<"spirv.Event">' for atomic access}}
- %0 = llvm.load %arg0 atomic monotonic {alignment = 8 : i64} : !llvm.ptr -> !llvm.target<"spirv.Event">
+ %0 = ptr.load %arg0 atomic monotonic {alignment = 8 : i64} : !llvm.ptr -> !llvm.target<"spirv.Event">
}
// -----
diff --git a/mlir/test/Dialect/LLVMIR/mem2reg-dbginfo.mlir b/mlir/test/Dialect/LLVMIR/mem2reg-dbginfo.mlir
index bb96256f3af28f..1eacaf92dda8cc 100644
--- a/mlir/test/Dialect/LLVMIR/mem2reg-dbginfo.mlir
+++ b/mlir/test/Dialect/LLVMIR/mem2reg-dbginfo.mlir
@@ -16,15 +16,15 @@ llvm.func @basic_store_load(%arg0: i64) -> i64 {
%0 = llvm.mlir.constant(1 : i32) : i32
// CHECK-NOT: = llvm.alloca
%1 = llvm.alloca %0 x i64 {alignment = 8 : i64} : (i32) -> !llvm.ptr
- // CHECK-NOT: llvm.store
- llvm.store %arg0, %1 {alignment = 4 : i64} : i64, !llvm.ptr
+ // CHECK-NOT: ptr.store
+ ptr.store %arg0, %1 {alignment = 4 : i64} : i64, !llvm.ptr
// CHECK-NOT: llvm.intr.dbg.declare
llvm.intr.dbg.declare #di_local_variable = %1 : !llvm.ptr
// CHECK: llvm.intr.dbg.value #[[$VAR]] = %[[LOADED:.*]] : i64
// CHECK-NOT: llvm.intr.dbg.value
// CHECK-NOT: llvm.intr.dbg.declare
- // CHECK-NOT: llvm.store
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i64
+ // CHECK-NOT: ptr.store
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i64
// CHECK: llvm.return %[[LOADED]] : i64
llvm.return %2 : i64
}
@@ -42,12 +42,12 @@ llvm.func @block_argument_value(%arg0: i64, %arg1: i1) -> i64 {
^bb1:
// CHECK: llvm.intr.dbg.value #[[$VAR]] = %[[ARG0]]
// CHECK-NOT: llvm.intr.dbg.value
- llvm.store %arg0, %1 {alignment = 4 : i64} : i64, !llvm.ptr
+ ptr.store %arg0, %1 {alignment = 4 : i64} : i64, !llvm.ptr
llvm.br ^bb2
// CHECK: ^{{.*}}(%[[BLOCKARG:.*]]: i64):
^bb2:
// CHECK: llvm.intr.dbg.value #[[$VAR]] = %[[BLOCKARG]]
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i64
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i64
llvm.return %2 : i64
}
@@ -63,10 +63,10 @@ llvm.func @double_block_argument_value(%arg0: i64, %arg1: i1) -> i64 {
// CHECK: ^{{.*}}(%[[BLOCKARG1:.*]]: i64):
^bb1:
// CHECK: llvm.intr.dbg.value #[[$VAR]] = %[[BLOCKARG1]]
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i64
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i64
llvm.call @use(%2) : (i64) -> ()
// CHECK: llvm.intr.dbg.value #[[$VAR]] = %[[ARG0]]
- llvm.store %arg0, %1 {alignment = 4 : i64} : i64, !llvm.ptr
+ ptr.store %arg0, %1 {alignment = 4 : i64} : i64, !llvm.ptr
llvm.br ^bb2
// CHECK-NOT: llvm.intr.dbg.value
// CHECK: ^{{.*}}(%[[BLOCKARG2:.*]]: i64):
diff --git a/mlir/test/Dialect/LLVMIR/mem2reg-intrinsics.mlir b/mlir/test/Dialect/LLVMIR/mem2reg-intrinsics.mlir
index ce6338fb348837..4f03d804dbbc4b 100644
--- a/mlir/test/Dialect/LLVMIR/mem2reg-intrinsics.mlir
+++ b/mlir/test/Dialect/LLVMIR/mem2reg-intrinsics.mlir
@@ -16,7 +16,7 @@ llvm.func @basic_memset(%memset_value: i8) -> i32 {
// CHECK: %[[SHIFTED_16:.*]] = llvm.shl %[[VALUE_16]], %[[C16]]
// CHECK: %[[VALUE_32:.*]] = llvm.or %[[VALUE_16]], %[[SHIFTED_16]]
// CHECK-NOT: "llvm.intr.memset"
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
// CHECK: llvm.return %[[VALUE_32]] : i32
llvm.return %2 : i32
}
@@ -30,7 +30,7 @@ llvm.func @basic_memset_constant() -> i32 {
%memset_value = llvm.mlir.constant(42 : i8) : i8
%memset_len = llvm.mlir.constant(4 : i32) : i32
"llvm.intr.memset"(%1, %memset_value, %memset_len) <{isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
// CHECK: %[[RES:.*]] = llvm.mlir.constant(707406378 : i32) : i32
// CHECK: llvm.return %[[RES]] : i32
llvm.return %2 : i32
@@ -57,7 +57,7 @@ llvm.func @exotic_target_memset(%memset_value: i8) -> i40 {
// CHECK: %[[SHIFTED_COMPL:.*]] = llvm.shl %[[VALUE_32]], %[[C32]]
// CHECK: %[[VALUE_COMPL:.*]] = llvm.or %[[VALUE_32]], %[[SHIFTED_COMPL]]
// CHECK-NOT: "llvm.intr.memset"
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i40
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i40
// CHECK: llvm.return %[[VALUE_COMPL]] : i40
llvm.return %2 : i40
}
@@ -71,7 +71,7 @@ llvm.func @exotic_target_memset_constant() -> i40 {
%memset_value = llvm.mlir.constant(42 : i8) : i8
%memset_len = llvm.mlir.constant(5 : i32) : i32
"llvm.intr.memset"(%1, %memset_value, %memset_len) <{isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i40
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i40
// CHECK: %[[RES:.*]] = llvm.mlir.constant(181096032810 : i40) : i40
// CHECK: llvm.return %[[RES]] : i40
llvm.return %2 : i40
@@ -91,7 +91,7 @@ llvm.func @no_volatile_memset() -> i32 {
%memset_len = llvm.mlir.constant(4 : i32) : i32
// CHECK: "llvm.intr.memset"(%[[ALLOCA]], %[[MEMSET_VALUE]], %[[MEMSET_LEN]]) <{isVolatile = true}>
"llvm.intr.memset"(%1, %memset_value, %memset_len) <{isVolatile = true}> : (!llvm.ptr, i8, i32) -> ()
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
llvm.return %2 : i32
}
@@ -109,7 +109,7 @@ llvm.func @no_partial_memset() -> i32 {
%memset_len = llvm.mlir.constant(2 : i32) : i32
// CHECK: "llvm.intr.memset"(%[[ALLOCA]], %[[MEMSET_VALUE]], %[[MEMSET_LEN]]) <{isVolatile = false}>
"llvm.intr.memset"(%1, %memset_value, %memset_len) <{isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
llvm.return %2 : i32
}
@@ -127,7 +127,7 @@ llvm.func @no_overflowing_memset() -> i32 {
%memset_len = llvm.mlir.constant(6 : i32) : i32
// CHECK: "llvm.intr.memset"(%[[ALLOCA]], %[[MEMSET_VALUE]], %[[MEMSET_LEN]]) <{isVolatile = false}>
"llvm.intr.memset"(%1, %memset_value, %memset_len) <{isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
llvm.return %2 : i32
}
@@ -145,7 +145,7 @@ llvm.func @only_byte_aligned_integers_memset() -> i10 {
%memset_len = llvm.mlir.constant(2 : i32) : i32
// CHECK: "llvm.intr.memset"(%[[ALLOCA]], %[[MEMSET_VALUE]], %[[MEMSET_LEN]]) <{isVolatile = false}>
"llvm.intr.memset"(%1, %memset_value, %memset_len) <{isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i10
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i10
llvm.return %2 : i10
}
@@ -160,9 +160,9 @@ llvm.func @basic_memcpy(%source: !llvm.ptr) -> i32 {
%memcpy_len = llvm.mlir.constant(4 : i32) : i32
"llvm.intr.memcpy"(%1, %source, %memcpy_len) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
// CHECK-NOT: "llvm.intr.memcpy"
- // CHECK: %[[LOADED:.*]] = llvm.load %[[SOURCE]] : !llvm.ptr -> i32
+ // CHECK: %[[LOADED:.*]] = ptr.load %[[SOURCE]] : !llvm.ptr -> i32
// CHECK-NOT: "llvm.intr.memcpy"
- %2 = llvm.load %1 : !llvm.ptr -> i32
+ %2 = ptr.load %1 : !llvm.ptr -> i32
// CHECK: llvm.return %[[LOADED]] : i32
llvm.return %2 : i32
}
@@ -179,13 +179,13 @@ llvm.func @basic_memcpy_dest(%destination: !llvm.ptr) -> i32 {
%memcpy_len = llvm.mlir.constant(4 : i32) : i32
%1 = llvm.alloca %0 x i32 : (i32) -> !llvm.ptr
- llvm.store %data, %1 : i32, !llvm.ptr
+ ptr.store %data, %1 : i32, !llvm.ptr
"llvm.intr.memcpy"(%destination, %1, %memcpy_len) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
// CHECK-NOT: "llvm.intr.memcpy"
- // CHECK: llvm.store %[[DATA]], %[[DESTINATION]] : i32, !llvm.ptr
+ // CHECK: ptr.store %[[DATA]], %[[DESTINATION]] : i32, !llvm.ptr
// CHECK-NOT: "llvm.intr.memcpy"
- %2 = llvm.load %1 : !llvm.ptr -> i32
+ %2 = ptr.load %1 : !llvm.ptr -> i32
// CHECK: llvm.return %[[DATA]] : i32
llvm.return %2 : i32
}
@@ -202,10 +202,10 @@ llvm.func @double_memcpy() -> i32 {
%1 = llvm.alloca %0 x i32 : (i32) -> !llvm.ptr
%2 = llvm.alloca %0 x i32 : (i32) -> !llvm.ptr
- llvm.store %data, %1 : i32, !llvm.ptr
+ ptr.store %data, %1 : i32, !llvm.ptr
"llvm.intr.memcpy"(%2, %1, %memcpy_len) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
- %res = llvm.load %2 : !llvm.ptr -> i32
+ %res = ptr.load %2 : !llvm.ptr -> i32
// CHECK-NEXT: llvm.return %[[DATA]] : i32
llvm.return %res : i32
}
@@ -224,7 +224,7 @@ llvm.func @ignore_self_memcpy() -> i32 {
// CHECK: "llvm.intr.memcpy"(%[[ALLOCA]], %[[ALLOCA]]
"llvm.intr.memcpy"(%1, %1, %memcpy_len) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
- %res = llvm.load %1 : !llvm.ptr -> i32
+ %res = ptr.load %1 : !llvm.ptr -> i32
llvm.return %res : i32
}
@@ -244,7 +244,7 @@ llvm.func @ignore_partial_memcpy(%source: !llvm.ptr) -> i32 {
// CHECK: "llvm.intr.memcpy"(%[[ALLOCA]], %[[SOURCE]], %[[MEMCPY_LEN]]) <{isVolatile = false}>
"llvm.intr.memcpy"(%1, %source, %memcpy_len) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
- %res = llvm.load %1 : !llvm.ptr -> i32
+ %res = ptr.load %1 : !llvm.ptr -> i32
llvm.return %res : i32
}
@@ -264,7 +264,7 @@ llvm.func @ignore_volatile_memcpy(%source: !llvm.ptr) -> i32 {
// CHECK: "llvm.intr.memcpy"(%[[ALLOCA]], %[[SOURCE]], %[[MEMCPY_LEN]]) <{isVolatile = true}>
"llvm.intr.memcpy"(%1, %source, %memcpy_len) <{isVolatile = true}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
- %res = llvm.load %1 : !llvm.ptr -> i32
+ %res = ptr.load %1 : !llvm.ptr -> i32
llvm.return %res : i32
}
@@ -279,9 +279,9 @@ llvm.func @basic_memmove(%source: !llvm.ptr) -> i32 {
%memmove_len = llvm.mlir.constant(4 : i32) : i32
"llvm.intr.memmove"(%1, %source, %memmove_len) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
// CHECK-NOT: "llvm.intr.memmove"
- // CHECK: %[[LOADED:.*]] = llvm.load %[[SOURCE]] : !llvm.ptr -> i32
+ // CHECK: %[[LOADED:.*]] = ptr.load %[[SOURCE]] : !llvm.ptr -> i32
// CHECK-NOT: "llvm.intr.memmove"
- %2 = llvm.load %1 : !llvm.ptr -> i32
+ %2 = ptr.load %1 : !llvm.ptr -> i32
// CHECK: llvm.return %[[LOADED]] : i32
llvm.return %2 : i32
}
@@ -296,9 +296,9 @@ llvm.func @basic_memcpy_inline(%source: !llvm.ptr) -> i32 {
%is_volatile = llvm.mlir.constant(false) : i1
"llvm.intr.memcpy.inline"(%1, %source) <{isVolatile = false, len = 4 : i32}> : (!llvm.ptr, !llvm.ptr) -> ()
// CHECK-NOT: "llvm.intr.memcpy.inline"
- // CHECK: %[[LOADED:.*]] = llvm.load %[[SOURCE]] : !llvm.ptr -> i32
+ // CHECK: %[[LOADED:.*]] = ptr.load %[[SOURCE]] : !llvm.ptr -> i32
// CHECK-NOT: "llvm.intr.memcpy.inline"
- %2 = llvm.load %1 : !llvm.ptr -> i32
+ %2 = ptr.load %1 : !llvm.ptr -> i32
// CHECK: llvm.return %[[LOADED]] : i32
llvm.return %2 : i32
}
diff --git a/mlir/test/Dialect/LLVMIR/mem2reg.mlir b/mlir/test/Dialect/LLVMIR/mem2reg.mlir
index 90e56c1166edfd..8c6806287e3f02 100644
--- a/mlir/test/Dialect/LLVMIR/mem2reg.mlir
+++ b/mlir/test/Dialect/LLVMIR/mem2reg.mlir
@@ -5,7 +5,7 @@ llvm.func @default_value() -> i32 {
// CHECK: %[[UNDEF:.*]] = llvm.mlir.undef : i32
%0 = llvm.mlir.constant(1 : i32) : i32
%1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
// CHECK: llvm.return %[[UNDEF]] : i32
llvm.return %2 : i32
}
@@ -19,10 +19,10 @@ llvm.func @store_of_ptr() {
%2 = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[ALLOCA:.*]] = llvm.alloca
%3 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
- // CHECK: llvm.store %{{.*}}, %[[ALLOCA]]
- llvm.store %1, %3 {alignment = 4 : i64} : i32, !llvm.ptr
- // CHECK: llvm.store %[[ALLOCA]], %{{.*}}
- llvm.store %3, %2 {alignment = 8 : i64} : !llvm.ptr, !llvm.ptr
+ // CHECK: ptr.store %{{.*}}, %[[ALLOCA]]
+ ptr.store %1, %3 {alignment = 4 : i64} : i32, !llvm.ptr
+ // CHECK: ptr.store %[[ALLOCA]], %{{.*}}
+ ptr.store %3, %2 {alignment = 8 : i64} : !llvm.ptr, !llvm.ptr
llvm.return
}
@@ -39,7 +39,7 @@ llvm.func @unreachable() {
// CHECK: ^{{.*}}:
// CHECK-NEXT: llvm.return
^bb1: // no predecessors
- llvm.store %1, %2 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %1, %2 {alignment = 4 : i64} : i32, !llvm.ptr
llvm.return
}
@@ -52,14 +52,14 @@ llvm.func @unreachable_in_loop() -> i32 {
%1 = llvm.mlir.constant(6 : i32) : i32
%2 = llvm.mlir.constant(5 : i32) : i32
%3 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
- llvm.store %1, %3 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %1, %3 {alignment = 4 : i64} : i32, !llvm.ptr
// CHECK: llvm.br ^[[LOOP:.*]]
llvm.br ^bb1
// CHECK: ^[[LOOP]]:
^bb1: // 2 preds: ^bb0, ^bb3
// CHECK-NEXT: llvm.br ^[[ENDOFLOOP:.*]]
- llvm.store %2, %3 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %2, %3 {alignment = 4 : i64} : i32, !llvm.ptr
llvm.br ^bb3
// CHECK: ^[[UNREACHABLE:.*]]:
@@ -84,12 +84,12 @@ llvm.func @branching(%arg0: i1, %arg1: i1) -> i32 {
// CHECK: llvm.cond_br %{{.*}}, ^[[BB2:.*]](%{{.*}} : i32), ^{{.*}}
llvm.cond_br %arg0, ^bb2, ^bb1
^bb1: // pred: ^bb0
- llvm.store %1, %2 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %1, %2 {alignment = 4 : i64} : i32, !llvm.ptr
// CHECK: llvm.cond_br %{{.*}}, ^[[BB2]](%{{.*}} : i32), ^[[BB2]](%{{.*}} : i32)
llvm.cond_br %arg1, ^bb2, ^bb2
// CHECK: ^[[BB2]](%[[V3:.*]]: i32):
^bb2: // 3 preds: ^bb0, ^bb1, ^bb1
- %3 = llvm.load %2 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %3 = ptr.load %2 {alignment = 4 : i64} : !llvm.ptr -> i32
// CHECK: llvm.return %[[V3]] : i32
llvm.return %3 : i32
}
@@ -104,12 +104,12 @@ llvm.func @recursive_alloca() -> i32 {
%2 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
%3 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
%4 = llvm.alloca %0 x !llvm.ptr {alignment = 8 : i64} : (i32) -> !llvm.ptr
- llvm.store %1, %3 {alignment = 4 : i64} : i32, !llvm.ptr
- llvm.store %3, %4 {alignment = 8 : i64} : !llvm.ptr, !llvm.ptr
- %5 = llvm.load %4 {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr
- %6 = llvm.load %5 {alignment = 4 : i64} : !llvm.ptr -> i32
- llvm.store %6, %2 {alignment = 4 : i64} : i32, !llvm.ptr
- %7 = llvm.load %2 {alignment = 4 : i64} : !llvm.ptr -> i32
+ ptr.store %1, %3 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %3, %4 {alignment = 8 : i64} : !llvm.ptr, !llvm.ptr
+ %5 = ptr.load %4 {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr
+ %6 = ptr.load %5 {alignment = 4 : i64} : !llvm.ptr -> i32
+ ptr.store %6, %2 {alignment = 4 : i64} : i32, !llvm.ptr
+ %7 = ptr.load %2 {alignment = 4 : i64} : !llvm.ptr -> i32
llvm.return %7 : i32
}
@@ -123,15 +123,15 @@ llvm.func @reset_in_branch(%arg0: i32, %arg1: i1) {
%1 = llvm.mlir.constant(true) : i1
%2 = llvm.mlir.constant(false) : i1
%3 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
- llvm.store %arg0, %3 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %arg0, %3 {alignment = 4 : i64} : i32, !llvm.ptr
llvm.cond_br %arg1, ^bb1, ^bb2
^bb1: // pred: ^bb0
- llvm.store %arg0, %3 {alignment = 4 : i64} : i32, !llvm.ptr
- %4 = llvm.load %3 {alignment = 4 : i64} : !llvm.ptr -> i32
+ ptr.store %arg0, %3 {alignment = 4 : i64} : i32, !llvm.ptr
+ %4 = ptr.load %3 {alignment = 4 : i64} : !llvm.ptr -> i32
llvm.call @reset_in_branch(%4, %2) : (i32, i1) -> ()
llvm.br ^bb3
^bb2: // pred: ^bb0
- %5 = llvm.load %3 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %5 = ptr.load %3 {alignment = 4 : i64} : !llvm.ptr -> i32
llvm.call @reset_in_branch(%5, %1) : (i32, i1) -> ()
llvm.br ^bb3
^bb3: // 2 preds: ^bb1, ^bb2
@@ -150,38 +150,38 @@ llvm.func @intertwined_alloca(%arg0: !llvm.ptr, %arg1: i32) {
%4 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
%5 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
%6 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
- llvm.store %arg0, %2 {alignment = 8 : i64} : !llvm.ptr, !llvm.ptr
- llvm.store %arg1, %3 {alignment = 4 : i64} : i32, !llvm.ptr
- llvm.store %1, %4 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %arg0, %2 {alignment = 8 : i64} : !llvm.ptr, !llvm.ptr
+ ptr.store %arg1, %3 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %1, %4 {alignment = 4 : i64} : i32, !llvm.ptr
llvm.br ^bb1
^bb1: // 2 preds: ^bb0, ^bb4
- %7 = llvm.load %3 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %7 = ptr.load %3 {alignment = 4 : i64} : !llvm.ptr -> i32
%8 = llvm.add %7, %0 : i32
- %9 = llvm.load %4 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %9 = ptr.load %4 {alignment = 4 : i64} : !llvm.ptr -> i32
%10 = llvm.icmp "sgt" %8, %9 : i32
%11 = llvm.zext %10 : i1 to i32
llvm.cond_br %10, ^bb2, ^bb5
^bb2: // pred: ^bb1
- %12 = llvm.load %6 {alignment = 4 : i64} : !llvm.ptr -> i32
- llvm.store %12, %5 {alignment = 4 : i64} : i32, !llvm.ptr
- llvm.store %1, %6 {alignment = 4 : i64} : i32, !llvm.ptr
- %13 = llvm.load %4 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %12 = ptr.load %6 {alignment = 4 : i64} : !llvm.ptr -> i32
+ ptr.store %12, %5 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %1, %6 {alignment = 4 : i64} : i32, !llvm.ptr
+ %13 = ptr.load %4 {alignment = 4 : i64} : !llvm.ptr -> i32
%14 = llvm.icmp "sgt" %13, %1 : i32
%15 = llvm.zext %14 : i1 to i32
llvm.cond_br %14, ^bb3, ^bb4
^bb3: // pred: ^bb2
- %16 = llvm.load %2 {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr
- %17 = llvm.load %4 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %16 = ptr.load %2 {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr
+ %17 = ptr.load %4 {alignment = 4 : i64} : !llvm.ptr -> i32
%18 = llvm.sub %17, %0 : i32
%19 = llvm.getelementptr %16[%18] : (!llvm.ptr, i32) -> !llvm.ptr, i8
- %20 = llvm.load %5 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %20 = ptr.load %5 {alignment = 4 : i64} : !llvm.ptr -> i32
%21 = llvm.trunc %20 : i32 to i8
- llvm.store %21, %19 {alignment = 1 : i64} : i8, !llvm.ptr
+ ptr.store %21, %19 {alignment = 1 : i64} : i8, !llvm.ptr
llvm.br ^bb4
^bb4: // 2 preds: ^bb2, ^bb3
- %22 = llvm.load %4 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %22 = ptr.load %4 {alignment = 4 : i64} : !llvm.ptr -> i32
%23 = llvm.add %22, %0 : i32
- llvm.store %23, %4 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %23, %4 {alignment = 4 : i64} : i32, !llvm.ptr
llvm.br ^bb1
^bb5: // pred: ^bb1
llvm.return
@@ -200,7 +200,7 @@ llvm.func @complex_cf(%arg0: i32, ...) {
^bb1: // pred: ^bb0
llvm.br ^bb2
^bb2: // 2 preds: ^bb0, ^bb1
- llvm.store %2, %3 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %2, %3 {alignment = 4 : i64} : i32, !llvm.ptr
llvm.br ^bb3
^bb3: // 2 preds: ^bb2, ^bb16
llvm.cond_br %1, ^bb4, ^bb17
@@ -233,7 +233,7 @@ llvm.func @complex_cf(%arg0: i32, ...) {
^bb17: // pred: ^bb3
llvm.br ^bb20
^bb18: // no predecessors
- %4 = llvm.load %3 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %4 = ptr.load %3 {alignment = 4 : i64} : !llvm.ptr -> i32
llvm.br ^bb24
^bb19: // no predecessors
llvm.br ^bb20
@@ -268,30 +268,30 @@ llvm.func @llvm_crash() -> i32 {
%5 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
%6 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
%7 = llvm.bitcast %1 : i32 to i32
- // CHECK: llvm.store volatile %{{.*}}, %[[VOLATILE_ALLOCA]]
- llvm.store volatile %1, %5 {alignment = 4 : i64} : i32, !llvm.ptr
+ // CHECK: ptr.store volatile %{{.*}}, %[[VOLATILE_ALLOCA]]
+ ptr.store volatile %1, %5 {alignment = 4 : i64} : i32, !llvm.ptr
%8 = llvm.call @_setjmp(%2) : (!llvm.ptr) -> i32
%9 = llvm.icmp "ne" %8, %1 : i32
%10 = llvm.zext %9 : i1 to i8
%11 = llvm.icmp "ne" %10, %3 : i8
llvm.cond_br %11, ^bb1, ^bb2
^bb1: // pred: ^bb0
- // CHECK: = llvm.load volatile %[[VOLATILE_ALLOCA]]
- %12 = llvm.load volatile %5 {alignment = 4 : i64} : !llvm.ptr -> i32
- llvm.store %12, %6 {alignment = 4 : i64} : i32, !llvm.ptr
+ // CHECK: = ptr.load volatile %[[VOLATILE_ALLOCA]]
+ %12 = ptr.load volatile %5 {alignment = 4 : i64} : !llvm.ptr -> i32
+ ptr.store %12, %6 {alignment = 4 : i64} : i32, !llvm.ptr
llvm.br ^bb3
^bb2: // pred: ^bb0
- // CHECK: llvm.store volatile %{{.*}}, %[[VOLATILE_ALLOCA]]
- llvm.store volatile %0, %5 {alignment = 4 : i64} : i32, !llvm.ptr
+ // CHECK: ptr.store volatile %{{.*}}, %[[VOLATILE_ALLOCA]]
+ ptr.store volatile %0, %5 {alignment = 4 : i64} : i32, !llvm.ptr
llvm.call @g() : () -> ()
- llvm.store %1, %6 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %1, %6 {alignment = 4 : i64} : i32, !llvm.ptr
llvm.br ^bb3
^bb3: // 2 preds: ^bb1, ^bb2
- %13 = llvm.load %6 {alignment = 4 : i64} : !llvm.ptr -> i32
- llvm.store %13, %4 {alignment = 4 : i64} : i32, !llvm.ptr
+ %13 = ptr.load %6 {alignment = 4 : i64} : !llvm.ptr -> i32
+ ptr.store %13, %4 {alignment = 4 : i64} : i32, !llvm.ptr
llvm.br ^bb4
^bb4: // pred: ^bb3
- %14 = llvm.load %4 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %14 = ptr.load %4 {alignment = 4 : i64} : !llvm.ptr -> i32
llvm.return %14 : i32
}
llvm.mlir.global external @j() {addr_space = 0 : i32} : !llvm.array<1 x struct<"struct.__jmp_buf_tag", (array<6 x i32>, i32, struct<"struct.__sigset_t", (array<32 x i32>)>)>>
@@ -306,7 +306,7 @@ llvm.func amdgpu_kernelcc @addrspace_discard() {
%0 = llvm.mlir.constant(1 : i32) : i32
%1 = llvm.mlir.constant(2 : i64) : i64
%2 = llvm.alloca %0 x i8 {alignment = 8 : i64} : (i32) -> !llvm.ptr<5>
- %3 = llvm.addrspacecast %2 : !llvm.ptr<5> to !llvm.ptr
+ %3 = ptr.addrspacecast %2 : !llvm.ptr<5> to !llvm.ptr
llvm.intr.lifetime.start 2, %3 : !llvm.ptr
llvm.return
}
@@ -319,8 +319,8 @@ llvm.func @ignore_atomic(%arg0: i32) -> i32 {
// CHECK-NOT: = llvm.alloca
%0 = llvm.mlir.constant(1 : i32) : i32
%1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
- llvm.store %arg0, %1 atomic seq_cst {alignment = 4 : i64} : i32, !llvm.ptr
- %2 = llvm.load %1 atomic seq_cst {alignment = 4 : i64} : !llvm.ptr -> i32
+ ptr.store %arg0, %1 atomic seq_cst {alignment = 4 : i64} : i32, !llvm.ptr
+ %2 = ptr.load %1 atomic seq_cst {alignment = 4 : i64} : !llvm.ptr -> i32
// CHECK: llvm.return %[[ARG0]] : i32
llvm.return %2 : i32
}
@@ -339,12 +339,12 @@ llvm.func @landing_pad() -> i32 attributes {personality = @__gxx_personality_v0}
%2 = llvm.invoke @landing_padf() to ^bb1 unwind ^bb3 : () -> i32
// CHECK: ^{{.*}}:
^bb1:// pred: ^bb0
- llvm.store %2, %1 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %2, %1 {alignment = 4 : i64} : i32, !llvm.ptr
// CHECK: llvm.br ^[[BB2:.*]](%[[V2]] : i32)
llvm.br ^bb2
// CHECK: ^[[BB2]]([[V3:.*]]: i32):
^bb2:// 2 preds: ^bb1, ^bb3
- %3 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %3 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
// CHECK: llvm.return [[V3]] : i32
llvm.return %3 : i32
// CHECK: ^{{.*}}:
@@ -367,12 +367,12 @@ llvm.func @unreachable_defines() -> i32 {
%1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
llvm.br ^bb1
^bb1: // 2 preds: ^bb0, ^bb2
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
// CHECK: llvm.return %[[UNDEF]] : i32
llvm.return %2 : i32
^bb2: // no predecessors
- %3 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
- llvm.store %3, %1 {alignment = 4 : i64} : i32, !llvm.ptr
+ %3 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
+ ptr.store %3, %1 {alignment = 4 : i64} : i32, !llvm.ptr
llvm.br ^bb1
}
@@ -387,15 +387,15 @@ llvm.func @unreachable_jumps_to_merge_point(%arg0: i1) -> i32 {
%3 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
llvm.cond_br %arg0, ^bb1, ^bb2
^bb1: // 2 preds: ^bb0, ^bb4
- llvm.store %1, %3 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %1, %3 {alignment = 4 : i64} : i32, !llvm.ptr
llvm.br ^bb4
^bb2: // pred: ^bb0
- llvm.store %2, %3 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %2, %3 {alignment = 4 : i64} : i32, !llvm.ptr
llvm.br ^bb4
^bb3: // no predecessors
llvm.br ^bb4
^bb4: // 3 preds: ^bb1, ^bb2, ^bb3
- %4 = llvm.load %3 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %4 = ptr.load %3 {alignment = 4 : i64} : !llvm.ptr -> i32
llvm.return %4 : i32
}
@@ -407,7 +407,7 @@ llvm.func @ignore_lifetime() {
%0 = llvm.mlir.constant(1 : i32) : i32
%1 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
llvm.intr.lifetime.start 2, %1 : !llvm.ptr
- llvm.store %0, %1 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %0, %1 {alignment = 4 : i64} : i32, !llvm.ptr
llvm.intr.lifetime.end 2, %1 : !llvm.ptr
llvm.return
}
@@ -426,7 +426,7 @@ llvm.func @ignore_discardable_tree() {
%6 = llvm.alloca %0 x !llvm.struct<(i8, i16)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
%7 = llvm.getelementptr %6[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i8, i16)>
llvm.intr.lifetime.start 2, %7 : !llvm.ptr
- llvm.store %5, %6 {alignment = 2 : i64} : !llvm.struct<(i8, i16)>, !llvm.ptr
+ ptr.store %5, %6 {alignment = 2 : i64} : !llvm.struct<(i8, i16)>, !llvm.ptr
llvm.intr.lifetime.end 2, %7 : !llvm.ptr
llvm.return
}
@@ -440,8 +440,8 @@ llvm.func @store_load_forward() -> i32 {
// CHECK: %[[RES:.*]] = llvm.mlir.constant(0 : i32) : i32
%1 = llvm.mlir.constant(0 : i32) : i32
%2 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
- llvm.store %1, %2 {alignment = 4 : i64} : i32, !llvm.ptr
- %3 = llvm.load %2 {alignment = 4 : i64} : !llvm.ptr -> i32
+ ptr.store %1, %2 {alignment = 4 : i64} : i32, !llvm.ptr
+ %3 = ptr.load %2 {alignment = 4 : i64} : !llvm.ptr -> i32
// CHECK: llvm.return %[[RES]] : i32
llvm.return %3 : i32
}
@@ -454,8 +454,8 @@ llvm.func @store_load_wrong_type() -> i16 {
%1 = llvm.mlir.constant(0 : i32) : i32
// CHECK: = llvm.alloca
%2 = llvm.alloca %0 x i32 {alignment = 4 : i64} : (i32) -> !llvm.ptr
- llvm.store %1, %2 {alignment = 4 : i64} : i32, !llvm.ptr
- %3 = llvm.load %2 {alignment = 2 : i64} : !llvm.ptr -> i16
+ ptr.store %1, %2 {alignment = 4 : i64} : i32, !llvm.ptr
+ %3 = ptr.load %2 {alignment = 2 : i64} : !llvm.ptr -> i16
llvm.return %3 : i16
}
@@ -471,12 +471,12 @@ llvm.func @merge_point_cycle() {
llvm.br ^bb1
// CHECK: ^[[BB1]](%[[BARG:.*]]: i32):
^bb1: // 2 preds: ^bb0, ^bb1
- %3 = llvm.load %2 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %3 = ptr.load %2 {alignment = 4 : i64} : !llvm.ptr -> i32
// CHECK: = llvm.call @use(%[[BARG]])
%4 = llvm.call @use(%3) : (i32) -> i1
// CHECK: %[[DEF:.*]] = llvm.call @def
%5 = llvm.call @def(%1) : (i32) -> i32
- llvm.store %5, %2 {alignment = 4 : i64} : i32, !llvm.ptr
+ ptr.store %5, %2 {alignment = 4 : i64} : i32, !llvm.ptr
// CHECK: llvm.cond_br %{{.*}}, ^[[BB1]](%[[DEF]] : i32), ^{{.*}}
llvm.cond_br %4, ^bb1, ^bb2
^bb2: // pred: ^bb1
@@ -497,7 +497,7 @@ llvm.func @no_unnecessary_arguments() {
llvm.br ^bb1
// CHECK: ^[[BB1]]:
^bb1: // 2 preds: ^bb0, ^bb1
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i32
// CHECK: = llvm.call @use(%[[UNDEF]])
%3 = llvm.call @use(%2) : (i32) -> i1
// CHECK: llvm.cond_br %{{.*}}, ^[[BB1]], ^{{.*}}
@@ -594,15 +594,15 @@ llvm.func @live_cycle(%arg0: i64, %arg1: i1, %arg2: i64) -> i64 {
%0 = llvm.mlir.constant(1 : i32) : i32
// CHECK-NOT: = llvm.alloca
%1 = llvm.alloca %0 x i64 {alignment = 8 : i64} : (i32) -> !llvm.ptr
- llvm.store %arg2, %1 {alignment = 4 : i64} : i64, !llvm.ptr
+ ptr.store %arg2, %1 {alignment = 4 : i64} : i64, !llvm.ptr
// CHECK: llvm.cond_br %{{.*}}, ^[[BB1:.*]](%[[ARG2]] : i64), ^[[BB2:.*]](%[[ARG2]] : i64)
llvm.cond_br %arg1, ^bb1, ^bb2
// CHECK: ^[[BB1]](%[[V1:.*]]: i64):
^bb1:
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i64
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i64
// CHECK: llvm.call @use(%[[V1]])
llvm.call @use(%2) : (i64) -> ()
- llvm.store %arg0, %1 {alignment = 4 : i64} : i64, !llvm.ptr
+ ptr.store %arg0, %1 {alignment = 4 : i64} : i64, !llvm.ptr
// CHECK: llvm.br ^[[BB2]](%[[ARG0]] : i64)
llvm.br ^bb2
// CHECK: ^[[BB2]](%[[V2:.*]]: i64):
@@ -623,17 +623,17 @@ llvm.func @subregion_block_promotion(%arg0: i64, %arg1: i64) -> i64 {
%0 = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[ALLOCA:.*]] = llvm.alloca
%1 = llvm.alloca %0 x i64 {alignment = 8 : i64} : (i32) -> !llvm.ptr
- // CHECK: llvm.store %[[ARG1]], %[[ALLOCA]]
- llvm.store %arg1, %1 {alignment = 4 : i64} : i64, !llvm.ptr
+ // CHECK: ptr.store %[[ARG1]], %[[ALLOCA]]
+ ptr.store %arg1, %1 {alignment = 4 : i64} : i64, !llvm.ptr
// CHECK: scf.execute_region {
scf.execute_region {
- // CHECK: llvm.store %[[ARG0]], %[[ALLOCA]]
- llvm.store %arg0, %1 {alignment = 4 : i64} : i64, !llvm.ptr
+ // CHECK: ptr.store %[[ARG0]], %[[ALLOCA]]
+ ptr.store %arg0, %1 {alignment = 4 : i64} : i64, !llvm.ptr
scf.yield
}
// CHECK: }
- // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i64
+ // CHECK: %[[RES:.*]] = ptr.load %[[ALLOCA]]
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i64
// CHECK: llvm.return %[[RES]] : i64
llvm.return %2 : i64
}
@@ -646,8 +646,8 @@ llvm.func @subregion_simple_transitive_promotion(%arg0: i64, %arg1: i64) -> i64
%0 = llvm.mlir.constant(1 : i32) : i32
// CHECK-NOT: = llvm.alloca
%1 = llvm.alloca %0 x i64 {alignment = 8 : i64} : (i32) -> !llvm.ptr
- llvm.store %arg1, %1 {alignment = 4 : i64} : i64, !llvm.ptr
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i64
+ ptr.store %arg1, %1 {alignment = 4 : i64} : i64, !llvm.ptr
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i64
// CHECK: scf.execute_region {
scf.execute_region {
// CHECK: llvm.call @use(%[[ARG1]])
@@ -677,10 +677,10 @@ llvm.func @no_inner_alloca_promotion(%arg: i64) -> i64 {
^bb1:
// CHECK: %[[ALLOCA:.*]] = llvm.alloca
%1 = llvm.alloca %0 x i64 {alignment = 8 : i64} : (i32) -> !llvm.ptr
- // CHECK: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.store %arg, %1 {alignment = 4 : i64} : i64, !llvm.ptr
- // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
- %2 = llvm.load %1 {alignment = 4 : i64} : !llvm.ptr -> i64
+ // CHECK: ptr.store %[[ARG]], %[[ALLOCA]]
+ ptr.store %arg, %1 {alignment = 4 : i64} : i64, !llvm.ptr
+ // CHECK: %[[RES:.*]] = ptr.load %[[ALLOCA]]
+ %2 = ptr.load %1 {alignment = 4 : i64} : !llvm.ptr -> i64
// CHECK: llvm.return %[[RES]] : i64
llvm.return %2 : i64
}
@@ -692,8 +692,8 @@ llvm.func @transitive_reaching_def() -> !llvm.ptr {
%0 = llvm.mlir.constant(1 : i32) : i32
// CHECK-NOT: alloca
%1 = llvm.alloca %0 x !llvm.ptr {alignment = 8 : i64} : (i32) -> !llvm.ptr
- %2 = llvm.load %1 {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr
- llvm.store %2, %1 {alignment = 8 : i64} : !llvm.ptr, !llvm.ptr
- %3 = llvm.load %1 {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr
+ %2 = ptr.load %1 {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr
+ ptr.store %2, %1 {alignment = 8 : i64} : !llvm.ptr, !llvm.ptr
+ %3 = ptr.load %1 {alignment = 8 : i64} : !llvm.ptr -> !llvm.ptr
llvm.return %3 : !llvm.ptr
}
diff --git a/mlir/test/Dialect/LLVMIR/opaque-ptr.mlir b/mlir/test/Dialect/LLVMIR/opaque-ptr.mlir
index 373931c747fc3e..578511bfaa7c3d 100644
--- a/mlir/test/Dialect/LLVMIR/opaque-ptr.mlir
+++ b/mlir/test/Dialect/LLVMIR/opaque-ptr.mlir
@@ -2,22 +2,22 @@
// CHECK-LABEL: @opaque_ptr_load
llvm.func @opaque_ptr_load(%arg0: !llvm.ptr) -> i32 {
- // CHECK: = llvm.load %{{.*}} : !llvm.ptr -> i32
- %0 = llvm.load %arg0 : !llvm.ptr -> i32
+ // CHECK: = ptr.load %{{.*}} : !llvm.ptr -> i32
+ %0 = ptr.load %arg0 : !llvm.ptr -> i32
llvm.return %0 : i32
}
// CHECK-LABEL: @opaque_ptr_store
llvm.func @opaque_ptr_store(%arg0: i32, %arg1: !llvm.ptr){
- // CHECK: llvm.store %{{.*}}, %{{.*}} : i32, !llvm.ptr
- llvm.store %arg0, %arg1 : i32, !llvm.ptr
+ // CHECK: ptr.store %{{.*}}, %{{.*}} : i32, !llvm.ptr
+ ptr.store %arg0, %arg1 : i32, !llvm.ptr
llvm.return
}
// CHECK-LABEL: @opaque_ptr_ptr_store
llvm.func @opaque_ptr_ptr_store(%arg0: !llvm.ptr, %arg1: !llvm.ptr) {
- // CHECK: llvm.store %{{.*}}, %{{.*}} : !llvm.ptr, !llvm.ptr
- llvm.store %arg0, %arg1 : !llvm.ptr, !llvm.ptr
+ // CHECK: ptr.store %{{.*}}, %{{.*}} : !llvm.ptr, !llvm.ptr
+ ptr.store %arg0, %arg1 : !llvm.ptr, !llvm.ptr
llvm.return
}
diff --git a/mlir/test/Dialect/LLVMIR/roundtrip.mlir b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
index 49f34785ebad52..aa293313ae9976 100644
--- a/mlir/test/Dialect/LLVMIR/roundtrip.mlir
+++ b/mlir/test/Dialect/LLVMIR/roundtrip.mlir
@@ -61,12 +61,12 @@ func.func @ops(%arg0: i32, %arg1: f32,
//
// CHECK-NEXT: %[[ALLOCA:.*]] = llvm.alloca %[[I32]] x f64 : (i32) -> !llvm.ptr
// CHECK-NEXT: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][%[[I32]]] : (!llvm.ptr, i32) -> !llvm.ptr, f64
-// CHECK-NEXT: %[[VALUE:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> f64
-// CHECK-NEXT: llvm.store %[[VALUE]], %[[ALLOCA]] : f64, !llvm.ptr
+// CHECK-NEXT: %[[VALUE:.*]] = ptr.load %[[GEP]] : !llvm.ptr -> f64
+// CHECK-NEXT: ptr.store %[[VALUE]], %[[ALLOCA]] : f64, !llvm.ptr
%13 = llvm.alloca %arg0 x f64 : (i32) -> !llvm.ptr
%14 = llvm.getelementptr %13[%arg0] : (!llvm.ptr, i32) -> !llvm.ptr, f64
- %15 = llvm.load %14 : !llvm.ptr -> f64
- llvm.store %15, %13 : f64, !llvm.ptr
+ %15 = ptr.load %14 : !llvm.ptr -> f64
+ ptr.store %15, %13 : f64, !llvm.ptr
// Function call-related operations.
//
@@ -150,10 +150,10 @@ func.func @ops(%arg0: i32, %arg1: f32,
// Integer to pointer and pointer to integer conversions.
//
-// CHECK: %[[PTR:.*]] = llvm.inttoptr %[[I32]] : i32 to !llvm.ptr
-// CHECK: %{{.*}} = llvm.ptrtoint %[[PTR]] : !llvm.ptr to i32
- %25 = llvm.inttoptr %arg0 : i32 to !llvm.ptr
- %26 = llvm.ptrtoint %25 : !llvm.ptr to i32
+// CHECK: %[[PTR:.*]] = ptr.inttoptr %[[I32]] : i32 to !llvm.ptr
+// CHECK: %{{.*}} = ptr.ptrtoint %[[PTR]] : !llvm.ptr to i32
+ %25 = ptr.inttoptr %arg0 : i32 to !llvm.ptr
+ %26 = ptr.ptrtoint %25 : !llvm.ptr to i32
// Extended and Quad floating point
//
@@ -290,8 +290,8 @@ func.func @casts(%arg0: i32, %arg1: i64, %arg2: vector<4xi32>,
%8 = llvm.fptosi %7 : f32 to i32
// CHECK: = llvm.fptoui %[[FLOAT]] : f32 to i32
%9 = llvm.fptoui %7 : f32 to i32
-// CHECK: = llvm.addrspacecast %[[PTR]] : !llvm.ptr to !llvm.ptr<2>
- %10 = llvm.addrspacecast %arg4 : !llvm.ptr to !llvm.ptr<2>
+// CHECK: = ptr.addrspacecast %[[PTR]] : !llvm.ptr to !llvm.ptr<2>
+ %10 = ptr.addrspacecast %arg4 : !llvm.ptr to !llvm.ptr<2>
// CHECK: = llvm.bitcast %[[I64]] : i64 to f64
%11 = llvm.bitcast %arg1 : i64 to f64
llvm.return
@@ -367,28 +367,28 @@ func.func @zero() {
// CHECK-LABEL: @atomic_load
func.func @atomic_load(%ptr : !llvm.ptr) {
- // CHECK: llvm.load %{{.*}} atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> f32
- %0 = llvm.load %ptr atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> f32
- // CHECK: llvm.load volatile %{{.*}} atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr -> f32
- %1 = llvm.load volatile %ptr atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr -> f32
+ // CHECK: ptr.load %{{.*}} atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> f32
+ %0 = ptr.load %ptr atomic monotonic {alignment = 4 : i64} : !llvm.ptr -> f32
+ // CHECK: ptr.load volatile %{{.*}} atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr -> f32
+ %1 = ptr.load volatile %ptr atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr -> f32
llvm.return
}
// CHECK-LABEL: @atomic_store
func.func @atomic_store(%val : f32, %ptr : !llvm.ptr) {
- // CHECK: llvm.store %{{.*}}, %{{.*}} atomic monotonic {alignment = 4 : i64} : f32, !llvm.ptr
- llvm.store %val, %ptr atomic monotonic {alignment = 4 : i64} : f32, !llvm.ptr
- // CHECK: llvm.store volatile %{{.*}}, %{{.*}} atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : f32, !llvm.ptr
- llvm.store volatile %val, %ptr atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : f32, !llvm.ptr
+ // CHECK: ptr.store %{{.*}}, %{{.*}} atomic monotonic {alignment = 4 : i64} : f32, !llvm.ptr
+ ptr.store %val, %ptr atomic monotonic {alignment = 4 : i64} : f32, !llvm.ptr
+ // CHECK: ptr.store volatile %{{.*}}, %{{.*}} atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : f32, !llvm.ptr
+ ptr.store volatile %val, %ptr atomic syncscope("singlethread") monotonic {alignment = 16 : i64} : f32, !llvm.ptr
llvm.return
}
// CHECK-LABEL: @atomicrmw
func.func @atomicrmw(%ptr : !llvm.ptr, %val : f32) {
- // CHECK: llvm.atomicrmw fadd %{{.*}}, %{{.*}} monotonic : !llvm.ptr, f32
- %0 = llvm.atomicrmw fadd %ptr, %val monotonic : !llvm.ptr, f32
- // CHECK: llvm.atomicrmw volatile fsub %{{.*}}, %{{.*}} syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr, f32
- %1 = llvm.atomicrmw volatile fsub %ptr, %val syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr, f32
+ // CHECK: ptr.atomicrmw fadd %{{.*}}, %{{.*}} monotonic : !llvm.ptr, f32
+ %0 = ptr.atomicrmw fadd %ptr, %val monotonic : !llvm.ptr, f32
+ // CHECK: ptr.atomicrmw volatile fsub %{{.*}}, %{{.*}} syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr, f32
+ %1 = ptr.atomicrmw volatile fsub %ptr, %val syncscope("singlethread") monotonic {alignment = 16 : i64} : !llvm.ptr, f32
llvm.return
}
diff --git a/mlir/test/Dialect/LLVMIR/sroa-intrinsics.mlir b/mlir/test/Dialect/LLVMIR/sroa-intrinsics.mlir
index c2e3458134ba4b..1e6b84443c5062 100644
--- a/mlir/test/Dialect/LLVMIR/sroa-intrinsics.mlir
+++ b/mlir/test/Dialect/LLVMIR/sroa-intrinsics.mlir
@@ -15,7 +15,7 @@ llvm.func @memset() -> i32 {
// CHECK: "llvm.intr.memset"(%[[ALLOCA]], %[[MEMSET_VALUE]], %[[MEMSET_LEN]]) <{isVolatile = false}>
"llvm.intr.memset"(%1, %memset_value, %memset_len) <{isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -37,7 +37,7 @@ llvm.func @memset_partial() -> i32 {
// CHECK: "llvm.intr.memset"(%[[ALLOCA]], %[[MEMSET_VALUE]], %[[MEMSET_LEN]]) <{isVolatile = false}>
"llvm.intr.memset"(%1, %memset_value, %memset_len) <{isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -58,7 +58,7 @@ llvm.func @memset_full() -> i32 {
// CHECK: "llvm.intr.memset"(%[[ALLOCA]], %[[MEMSET_VALUE]], %[[MEMSET_LEN]]) <{isVolatile = false}>
"llvm.intr.memset"(%1, %memset_value, %memset_len) <{isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -78,7 +78,7 @@ llvm.func @memset_too_much() -> i32 {
// CHECK: "llvm.intr.memset"(%[[ALLOCA]], %[[MEMSET_VALUE]], %[[MEMSET_LEN]]) <{isVolatile = false}>
"llvm.intr.memset"(%1, %memset_value, %memset_len) <{isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -97,7 +97,7 @@ llvm.func @memset_no_volatile() -> i32 {
// CHECK: "llvm.intr.memset"(%[[ALLOCA]], %[[MEMSET_VALUE]], %[[MEMSET_LEN]]) <{isVolatile = true}>
"llvm.intr.memset"(%1, %memset_value, %memset_len) <{isVolatile = true}> : (!llvm.ptr, i8, i32) -> ()
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -117,7 +117,7 @@ llvm.func @indirect_memset() -> i32 {
%2 = llvm.getelementptr %1[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32)>
// CHECK: "llvm.intr.memset"(%[[ALLOCA]], %[[MEMSET_VALUE]], %[[MEMSET_LEN]]) <{isVolatile = false}>
"llvm.intr.memset"(%2, %memset_value, %memset_len) <{isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -138,7 +138,7 @@ llvm.func @invalid_indirect_memset() -> i32 {
%2 = llvm.getelementptr %1[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32)>
// CHECK: "llvm.intr.memset"(%[[GEP]], %[[MEMSET_VALUE]], %[[MEMSET_LEN]]) <{isVolatile = false}>
"llvm.intr.memset"(%2, %memset_value, %memset_len) <{isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -164,9 +164,9 @@ llvm.func @memset_double_use() -> i32 {
// CHECK-NOT: "llvm.intr.memset"
"llvm.intr.memset"(%1, %memset_value, %memset_len) <{isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
%2 = llvm.getelementptr %1[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f32)>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
%4 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f32)>
- %5 = llvm.load %4 : !llvm.ptr -> f32
+ %5 = ptr.load %4 : !llvm.ptr -> f32
// We use this exotic bitcast to use the f32 easily. Semantics do not matter here.
%6 = llvm.bitcast %5 : f32 to i32
%7 = llvm.add %3, %6 : i32
@@ -195,9 +195,9 @@ llvm.func @memset_considers_alignment() -> i32 {
// CHECK-NOT: "llvm.intr.memset"
"llvm.intr.memset"(%1, %memset_value, %memset_len) <{isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i8, i32, f32)>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
%4 = llvm.getelementptr %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i8, i32, f32)>
- %5 = llvm.load %4 : !llvm.ptr -> f32
+ %5 = ptr.load %4 : !llvm.ptr -> f32
// We use this exotic bitcast to use the f32 easily. Semantics do not matter here.
%6 = llvm.bitcast %5 : f32 to i32
%7 = llvm.add %3, %6 : i32
@@ -227,9 +227,9 @@ llvm.func @memset_considers_packing() -> i32 {
// CHECK-NOT: "llvm.intr.memset"
"llvm.intr.memset"(%1, %memset_value, %memset_len) <{isVolatile = false}> : (!llvm.ptr, i8, i32) -> ()
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", packed (i8, i32, f32)>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
%4 = llvm.getelementptr %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", packed (i8, i32, f32)>
- %5 = llvm.load %4 : !llvm.ptr -> f32
+ %5 = ptr.load %4 : !llvm.ptr -> f32
// We use this exotic bitcast to use the f32 easily. Semantics do not matter here.
%6 = llvm.bitcast %5 : f32 to i32
%7 = llvm.add %3, %6 : i32
@@ -252,7 +252,7 @@ llvm.func @memcpy_dest(%other_array: !llvm.ptr) -> i32 {
// CHECK: "llvm.intr.memcpy"(%[[ALLOCA]], %[[SLOT_IN_OTHER]], %[[MEMCPY_LEN]]) <{isVolatile = false}>
"llvm.intr.memcpy"(%1, %other_array, %memcpy_len) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -281,7 +281,7 @@ llvm.func @memcpy_src(%other_array: !llvm.ptr) -> i32 {
// CHECK-DAG: "llvm.intr.memcpy"(%[[SLOT_IN_OTHER]], %{{.*}}, %[[MEMCPY_LEN]]) <{isVolatile = false}>
"llvm.intr.memcpy"(%other_array, %1, %memcpy_len) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<4 x i32>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -301,7 +301,7 @@ llvm.func @memcpy_double() -> i32 {
// CHECK-NOT: "llvm.intr.memcpy"
"llvm.intr.memcpy"(%1, %2, %memcpy_len) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
%3 = llvm.getelementptr %1[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<1 x i32>
- %4 = llvm.load %3 : !llvm.ptr -> i32
+ %4 = ptr.load %3 : !llvm.ptr -> i32
llvm.return %4 : i32
}
@@ -319,7 +319,7 @@ llvm.func @memcpy_no_partial(%other_array: !llvm.ptr) -> i32 {
// CHECK: "llvm.intr.memcpy"(%[[ALLOCA]], %[[OTHER_ARRAY]], %[[MEMCPY_LEN]]) <{isVolatile = false}>
"llvm.intr.memcpy"(%1, %other_array, %memcpy_len) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -337,7 +337,7 @@ llvm.func @memcpy_no_volatile(%other_array: !llvm.ptr) -> i32 {
// CHECK: "llvm.intr.memcpy"(%[[ALLOCA]], %[[OTHER_ARRAY]], %[[MEMCPY_LEN]]) <{isVolatile = true}>
"llvm.intr.memcpy"(%1, %other_array, %memcpy_len) <{isVolatile = true}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -357,7 +357,7 @@ llvm.func @memmove_dest(%other_array: !llvm.ptr) -> i32 {
// CHECK: "llvm.intr.memmove"(%[[ALLOCA]], %[[SLOT_IN_OTHER]], %[[MEMMOVE_LEN]]) <{isVolatile = false}>
"llvm.intr.memmove"(%1, %other_array, %memmove_len) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -386,7 +386,7 @@ llvm.func @memmove_src(%other_array: !llvm.ptr) -> i32 {
// CHECK-DAG: "llvm.intr.memmove"(%[[SLOT_IN_OTHER]], %{{.*}}, %[[MEMMOVE_LEN]]) <{isVolatile = false}>
"llvm.intr.memmove"(%other_array, %1, %memmove_len) <{isVolatile = false}> : (!llvm.ptr, !llvm.ptr, i32) -> ()
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<4 x i32>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -404,7 +404,7 @@ llvm.func @memcpy_inline_dest(%other_array: !llvm.ptr) -> i32 {
// CHECK: "llvm.intr.memcpy.inline"(%[[ALLOCA]], %[[SLOT_IN_OTHER]]) <{isVolatile = false, len = 4 : i32}>
"llvm.intr.memcpy.inline"(%1, %other_array) <{isVolatile = false, len = 40 : i32}> : (!llvm.ptr, !llvm.ptr) -> ()
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -431,6 +431,6 @@ llvm.func @memcpy_inline_src(%other_array: !llvm.ptr) -> i32 {
// CHECK-DAG: "llvm.intr.memcpy.inline"(%[[SLOT_IN_OTHER]], %{{.*}}) <{isVolatile = false, len = 4 : i32}>
"llvm.intr.memcpy.inline"(%other_array, %1) <{isVolatile = false, len = 16 : i32}> : (!llvm.ptr, !llvm.ptr) -> ()
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<4 x i32>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
diff --git a/mlir/test/Dialect/LLVMIR/sroa-statistics.mlir b/mlir/test/Dialect/LLVMIR/sroa-statistics.mlir
index 2905e8745ae029..25c44a79704d12 100644
--- a/mlir/test/Dialect/LLVMIR/sroa-statistics.mlir
+++ b/mlir/test/Dialect/LLVMIR/sroa-statistics.mlir
@@ -9,7 +9,7 @@ llvm.func @basic() -> i32 {
%0 = llvm.mlir.constant(1 : i32) : i32
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
%2 = llvm.getelementptr inbounds %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32)>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -24,8 +24,8 @@ llvm.func @basic_no_memory_benefit() -> i32 {
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
%2 = llvm.getelementptr inbounds %1[0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32)>
%3 = llvm.getelementptr inbounds %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32)>
- %4 = llvm.load %2 : !llvm.ptr -> i32
- %5 = llvm.load %3 : !llvm.ptr -> i32
+ %4 = ptr.load %2 : !llvm.ptr -> i32
+ %5 = ptr.load %3 : !llvm.ptr -> i32
%6 = llvm.add %4, %5 : i32
llvm.return %6 : i32
}
@@ -40,7 +40,7 @@ llvm.func @basic_array() -> i32 {
%0 = llvm.mlir.constant(1 : i32) : i32
%1 = llvm.alloca %0 x !llvm.array<10 x i32> {alignment = 8 : i64} : (i32) -> !llvm.ptr
%2 = llvm.getelementptr inbounds %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -57,6 +57,6 @@ llvm.func @multi_level_direct() -> i32 {
%0 = llvm.mlir.constant(1 : i32) : i32
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, struct<"bar", (i8, array<10 x array<10 x i32>>, i8)>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
%2 = llvm.getelementptr inbounds %1[0, 2, 1, 5, 8] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, struct<"bar", (i8, array<10 x array<10 x i32>>, i8)>)>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
diff --git a/mlir/test/Dialect/LLVMIR/sroa.mlir b/mlir/test/Dialect/LLVMIR/sroa.mlir
index f56ea53ac029e7..4af50dad8c4930 100644
--- a/mlir/test/Dialect/LLVMIR/sroa.mlir
+++ b/mlir/test/Dialect/LLVMIR/sroa.mlir
@@ -7,8 +7,8 @@ llvm.func @basic_struct() -> i32 {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x i32
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
%2 = llvm.getelementptr inbounds %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
- // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ // CHECK: %[[RES:.*]] = ptr.load %[[ALLOCA]]
+ %3 = ptr.load %2 : !llvm.ptr -> i32
// CHECK: llvm.return %[[RES]] : i32
llvm.return %3 : i32
}
@@ -22,8 +22,8 @@ llvm.func @basic_array() -> i32 {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x i32
%1 = llvm.alloca %0 x !llvm.array<10 x i32> {alignment = 8 : i64} : (i32) -> !llvm.ptr
%2 = llvm.getelementptr inbounds %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
- // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ // CHECK: %[[RES:.*]] = ptr.load %[[ALLOCA]]
+ %3 = ptr.load %2 : !llvm.ptr -> i32
// CHECK: llvm.return %[[RES]] : i32
llvm.return %3 : i32
}
@@ -37,8 +37,8 @@ llvm.func @multi_level_direct() -> i32 {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x i32
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, struct<"bar", (i8, array<10 x array<10 x i32>>, i8)>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
%2 = llvm.getelementptr inbounds %1[0, 2, 1, 5, 8] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, struct<"bar", (i8, array<10 x array<10 x i32>>, i8)>)>
- // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ // CHECK: %[[RES:.*]] = ptr.load %[[ALLOCA]]
+ %3 = ptr.load %2 : !llvm.ptr -> i32
// CHECK: llvm.return %[[RES]] : i32
llvm.return %3 : i32
}
@@ -58,8 +58,8 @@ llvm.func @multi_level_direct_two_applications() -> i32 {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x i32
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, array<10 x i32>, i8)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
%2 = llvm.getelementptr inbounds %1[0, 2, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, array<10 x i32>, i8)>
- // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ // CHECK: %[[RES:.*]] = ptr.load %[[ALLOCA]]
+ %3 = ptr.load %2 : !llvm.ptr -> i32
// CHECK: llvm.return %[[RES]] : i32
llvm.return %3 : i32
}
@@ -74,8 +74,8 @@ llvm.func @multi_level_indirect() -> i32 {
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, struct<"bar", (i8, array<10 x array<10 x i32>>, i8)>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
%2 = llvm.getelementptr inbounds %1[0, 2, 1, 5] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, struct<"bar", (i8, array<10 x array<10 x i32>>, i8)>)>
%3 = llvm.getelementptr inbounds %2[0, 8] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<10 x i32>
- // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
- %4 = llvm.load %3 : !llvm.ptr -> i32
+ // CHECK: %[[RES:.*]] = ptr.load %[[ALLOCA]]
+ %4 = ptr.load %3 : !llvm.ptr -> i32
// CHECK: llvm.return %[[RES]] : i32
llvm.return %4 : i32
}
@@ -91,10 +91,10 @@ llvm.func @resolve_alias(%arg: i32) -> i32 {
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
%2 = llvm.getelementptr %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
%3 = llvm.getelementptr inbounds %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
- // CHECK: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.store %arg, %2 : i32, !llvm.ptr
- // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
- %4 = llvm.load %3 : !llvm.ptr -> i32
+ // CHECK: ptr.store %[[ARG]], %[[ALLOCA]]
+ ptr.store %arg, %2 : i32, !llvm.ptr
+ // CHECK: %[[RES:.*]] = ptr.load %[[ALLOCA]]
+ %4 = ptr.load %3 : !llvm.ptr -> i32
// CHECK: llvm.return %[[RES]] : i32
llvm.return %4 : i32
}
@@ -109,7 +109,7 @@ llvm.func @no_non_single_support() -> i32 {
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
// CHECK-NOT: = llvm.alloca
%2 = llvm.getelementptr inbounds %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -123,7 +123,7 @@ llvm.func @no_pointer_indexing() -> i32 {
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
// CHECK-NOT: = llvm.alloca
%2 = llvm.getelementptr %1[1, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.return %3 : i32
}
@@ -137,7 +137,7 @@ llvm.func @no_direct_use() -> i32 {
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
// CHECK-NOT: = llvm.alloca
%2 = llvm.getelementptr %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ %3 = ptr.load %2 : !llvm.ptr -> i32
llvm.call @use(%1) : (!llvm.ptr) -> ()
llvm.return %3 : i32
}
@@ -153,8 +153,8 @@ llvm.func @direct_promotable_use_is_fine() -> i32 {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[SIZE]] x i32
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, f64, i32)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
%2 = llvm.getelementptr %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
- // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ // CHECK: %[[RES:.*]] = ptr.load %[[ALLOCA]]
+ %3 = ptr.load %2 : !llvm.ptr -> i32
// This is a direct use of the slot but it can be removed because it implements PromotableOpInterface.
llvm.intr.lifetime.start 2, %1 : !llvm.ptr
// CHECK: llvm.return %[[RES]] : i32
@@ -172,8 +172,8 @@ llvm.func @direct_promotable_use_is_fine_on_accessor() -> i32 {
%2 = llvm.getelementptr %1[0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, f64, i32)>
// This does not provide side-effect info but it can be removed because it implements PromotableOpInterface.
%3 = llvm.intr.invariant.start 2, %2 : !llvm.ptr
- // CHECK: %[[RES:.*]] = llvm.load %[[ALLOCA]]
- %4 = llvm.load %2 : !llvm.ptr -> i32
+ // CHECK: %[[RES:.*]] = ptr.load %[[ALLOCA]]
+ %4 = ptr.load %2 : !llvm.ptr -> i32
// This does not provide side-effect info but it can be removed because it implements PromotableOpInterface.
llvm.intr.invariant.end %3, 2, %2 : !llvm.ptr
// CHECK: llvm.return %[[RES]] : i32
@@ -192,8 +192,8 @@ llvm.func @no_dynamic_indexing(%arg: i32) -> i32 {
// CHECK-NOT: = llvm.alloca
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, %[[ARG]]]
%2 = llvm.getelementptr %1[0, %arg] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.array<10 x i32>
- // CHECK: %[[RES:.*]] = llvm.load %[[GEP]]
- %3 = llvm.load %2 : !llvm.ptr -> i32
+ // CHECK: %[[RES:.*]] = ptr.load %[[GEP]]
+ %3 = ptr.load %2 : !llvm.ptr -> i32
// CHECK: llvm.return %[[RES]] : i32
llvm.return %3 : i32
}
diff --git a/mlir/test/Dialect/LLVMIR/tbaa-roundtrip.mlir b/mlir/test/Dialect/LLVMIR/tbaa-roundtrip.mlir
index 472ae609004c00..5ce4f66eca4b8c 100644
--- a/mlir/test/Dialect/LLVMIR/tbaa-roundtrip.mlir
+++ b/mlir/test/Dialect/LLVMIR/tbaa-roundtrip.mlir
@@ -35,15 +35,15 @@
llvm.func @tbaa1(%arg0: !llvm.ptr, %arg1: !llvm.ptr) {
%0 = llvm.mlir.constant(1 : i8) : i8
- llvm.store %0, %arg0 {tbaa = [#tbaa_tag_0]} : i8, !llvm.ptr
- llvm.store %0, %arg1 {tbaa = [#tbaa_tag_1]} : i8, !llvm.ptr
+ ptr.store %0, %arg0 {tbaa = [#tbaa_tag_0]} : i8, !llvm.ptr
+ ptr.store %0, %arg1 {tbaa = [#tbaa_tag_1]} : i8, !llvm.ptr
llvm.return
}
// CHECK: llvm.func @tbaa1(%[[VAL_0:.*]]: !llvm.ptr, %[[VAL_1:.*]]: !llvm.ptr) {
// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(1 : i8) : i8
-// CHECK: llvm.store %[[VAL_2]], %[[VAL_0]] {tbaa = [#[[$TAG_0]]]} : i8, !llvm.ptr
-// CHECK: llvm.store %[[VAL_2]], %[[VAL_1]] {tbaa = [#[[$TAG_1]]]} : i8, !llvm.ptr
+// CHECK: ptr.store %[[VAL_2]], %[[VAL_0]] {tbaa = [#[[$TAG_0]]]} : i8, !llvm.ptr
+// CHECK: ptr.store %[[VAL_2]], %[[VAL_1]] {tbaa = [#[[$TAG_1]]]} : i8, !llvm.ptr
// CHECK: llvm.return
// CHECK: }
@@ -51,10 +51,10 @@ llvm.func @tbaa2(%arg0: !llvm.ptr, %arg1: !llvm.ptr) {
%0 = llvm.mlir.constant(0 : i32) : i32
%1 = llvm.mlir.constant(1 : i32) : i32
%2 = llvm.getelementptr inbounds %arg1[%0, 1] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<"struct.agg2_t", (i64, i64)>
- %3 = llvm.load %2 {tbaa = [#tbaa_tag_2]} : !llvm.ptr -> i64
+ %3 = ptr.load %2 {tbaa = [#tbaa_tag_2]} : !llvm.ptr -> i64
%4 = llvm.trunc %3 : i64 to i32
%5 = llvm.getelementptr inbounds %arg0[%0, 0] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<"struct.agg1_t", (i32, i32)>
- llvm.store %4, %5 {tbaa = [#tbaa_tag_3]} : i32, !llvm.ptr
+ ptr.store %4, %5 {tbaa = [#tbaa_tag_3]} : i32, !llvm.ptr
llvm.return
}
@@ -62,33 +62,33 @@ llvm.func @tbaa2(%arg0: !llvm.ptr, %arg1: !llvm.ptr) {
// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[VAL_4:.*]] = llvm.getelementptr inbounds %[[VAL_1]]{{\[}}%[[VAL_2]], 1] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<"struct.agg2_t", (i64, i64)>
-// CHECK: %[[VAL_5:.*]] = llvm.load %[[VAL_4]] {tbaa = [#[[$TAG_2]]]} : !llvm.ptr -> i64
+// CHECK: %[[VAL_5:.*]] = ptr.load %[[VAL_4]] {tbaa = [#[[$TAG_2]]]} : !llvm.ptr -> i64
// CHECK: %[[VAL_6:.*]] = llvm.trunc %[[VAL_5]] : i64 to i32
// CHECK: %[[VAL_7:.*]] = llvm.getelementptr inbounds %[[VAL_0]]{{\[}}%[[VAL_2]], 0] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<"struct.agg1_t", (i32, i32)>
-// CHECK: llvm.store %[[VAL_6]], %[[VAL_7]] {tbaa = [#[[$TAG_3]]]} : i32, !llvm.ptr
+// CHECK: ptr.store %[[VAL_6]], %[[VAL_7]] {tbaa = [#[[$TAG_3]]]} : i32, !llvm.ptr
// CHECK: llvm.return
// CHECK: }
llvm.func @tbaa3(%arg0: !llvm.ptr) {
%0 = llvm.mlir.constant(1 : i8) : i8
- llvm.store %0, %arg0 {tbaa = [#tbaa_tag_0, #tbaa_tag_1]} : i8, !llvm.ptr
+ ptr.store %0, %arg0 {tbaa = [#tbaa_tag_0, #tbaa_tag_1]} : i8, !llvm.ptr
llvm.return
}
// CHECK: llvm.func @tbaa3(%[[VAL_0:.*]]: !llvm.ptr) {
// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(1 : i8) : i8
-// CHECK: llvm.store %[[VAL_1]], %[[VAL_0]] {tbaa = [#[[$TAG_0]], #[[$TAG_1]]]} : i8, !llvm.ptr
+// CHECK: ptr.store %[[VAL_1]], %[[VAL_0]] {tbaa = [#[[$TAG_0]], #[[$TAG_1]]]} : i8, !llvm.ptr
// CHECK: llvm.return
// CHECK: }
llvm.func @tbaa4(%arg0: !llvm.ptr) {
%0 = llvm.mlir.constant(1 : i8) : i8
- llvm.store %0, %arg0 {tbaa = [#tbaa_tag_4]} : i8, !llvm.ptr
+ ptr.store %0, %arg0 {tbaa = [#tbaa_tag_4]} : i8, !llvm.ptr
llvm.return
}
// CHECK: llvm.func @tbaa4(%[[VAL_0:.*]]: !llvm.ptr) {
// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(1 : i8) : i8
-// CHECK: llvm.store %[[VAL_1]], %[[VAL_0]] {tbaa = [#[[$TAG_4]]]} : i8, !llvm.ptr
+// CHECK: ptr.store %[[VAL_1]], %[[VAL_0]] {tbaa = [#[[$TAG_4]]]} : i8, !llvm.ptr
// CHECK: llvm.return
// CHECK: }
diff --git a/mlir/test/Dialect/LLVMIR/type-consistency.mlir b/mlir/test/Dialect/LLVMIR/type-consistency.mlir
index 3a1ab924ebdacb..3a0dcc11e08478 100644
--- a/mlir/test/Dialect/LLVMIR/type-consistency.mlir
+++ b/mlir/test/Dialect/LLVMIR/type-consistency.mlir
@@ -7,7 +7,7 @@ llvm.func @same_address(%arg: i32) {
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
// CHECK: = llvm.getelementptr %[[ALLOCA]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32)>
%7 = llvm.getelementptr %1[8] : (!llvm.ptr) -> !llvm.ptr, i8
- llvm.store %arg, %7 : i32, !llvm.ptr
+ ptr.store %arg, %7 : i32, !llvm.ptr
llvm.return
}
@@ -20,7 +20,7 @@ llvm.func @same_address_keep_inbounds(%arg: i32) {
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
// CHECK: = llvm.getelementptr inbounds %[[ALLOCA]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32)>
%7 = llvm.getelementptr inbounds %1[8] : (!llvm.ptr) -> !llvm.ptr, i8
- llvm.store %arg, %7 : i32, !llvm.ptr
+ ptr.store %arg, %7 : i32, !llvm.ptr
llvm.return
}
@@ -32,8 +32,8 @@ llvm.func @struct_store_instead_of_first_field(%arg: i32) {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, i32)>
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32)>
- // CHECK: llvm.store %{{.*}}, %[[GEP]] : i32
- llvm.store %arg, %1 : i32, !llvm.ptr
+ // CHECK: ptr.store %{{.*}}, %[[GEP]] : i32
+ ptr.store %arg, %1 : i32, !llvm.ptr
llvm.return
}
@@ -47,8 +47,8 @@ llvm.func @struct_store_instead_of_first_field_same_size(%arg: f32) {
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
// CHECK-DAG: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32)>
// CHECK-DAG: %[[BITCAST:.*]] = llvm.bitcast %[[ARG]] : f32 to i32
- // CHECK: llvm.store %[[BITCAST]], %[[GEP]] : i32
- llvm.store %arg, %1 : f32, !llvm.ptr
+ // CHECK: ptr.store %[[BITCAST]], %[[GEP]] : i32
+ ptr.store %arg, %1 : f32, !llvm.ptr
llvm.return
}
@@ -60,8 +60,8 @@ llvm.func @struct_load_instead_of_first_field() -> i32 {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, i32)>
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32)>
- // CHECK: %[[RES:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> i32
- %2 = llvm.load %1 : !llvm.ptr -> i32
+ // CHECK: %[[RES:.*]] = ptr.load %[[GEP]] : !llvm.ptr -> i32
+ %2 = ptr.load %1 : !llvm.ptr -> i32
// CHECK: llvm.return %[[RES]] : i32
llvm.return %2 : i32
}
@@ -74,9 +74,9 @@ llvm.func @struct_load_instead_of_first_field_same_size() -> f32 {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, i32)>
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32)>
- // CHECK: %[[LOADED:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> i32
+ // CHECK: %[[LOADED:.*]] = ptr.load %[[GEP]] : !llvm.ptr -> i32
// CHECK: %[[RES:.*]] = llvm.bitcast %[[LOADED]] : i32 to f32
- %2 = llvm.load %1 : !llvm.ptr -> f32
+ %2 = ptr.load %1 : !llvm.ptr -> f32
// CHECK: llvm.return %[[RES]] : f32
llvm.return %2 : f32
}
@@ -90,7 +90,7 @@ llvm.func @index_in_final_padding(%arg: i32) {
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i8)> : (i32) -> !llvm.ptr
// CHECK: = llvm.getelementptr %[[ALLOCA]][7] : (!llvm.ptr) -> !llvm.ptr, i8
%7 = llvm.getelementptr %1[7] : (!llvm.ptr) -> !llvm.ptr, i8
- llvm.store %arg, %7 : i32, !llvm.ptr
+ ptr.store %arg, %7 : i32, !llvm.ptr
llvm.return
}
@@ -103,7 +103,7 @@ llvm.func @index_out_of_bounds(%arg: i32) {
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32)> : (i32) -> !llvm.ptr
// CHECK: = llvm.getelementptr %[[ALLOCA]][9] : (!llvm.ptr) -> !llvm.ptr, i8
%7 = llvm.getelementptr %1[9] : (!llvm.ptr) -> !llvm.ptr, i8
- llvm.store %arg, %7 : i32, !llvm.ptr
+ ptr.store %arg, %7 : i32, !llvm.ptr
llvm.return
}
@@ -116,7 +116,7 @@ llvm.func @index_in_padding(%arg: i16) {
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i16, i32)> : (i32) -> !llvm.ptr
// CHECK: = llvm.getelementptr %[[ALLOCA]][2] : (!llvm.ptr) -> !llvm.ptr, i8
%7 = llvm.getelementptr %1[2] : (!llvm.ptr) -> !llvm.ptr, i8
- llvm.store %arg, %7 : i16, !llvm.ptr
+ ptr.store %arg, %7 : i16, !llvm.ptr
llvm.return
}
@@ -129,7 +129,7 @@ llvm.func @index_not_in_padding_because_packed(%arg: i16) {
%1 = llvm.alloca %0 x !llvm.struct<"foo", packed (i16, i32)> : (i32) -> !llvm.ptr
// CHECK: = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", packed (i16, i32)>
%7 = llvm.getelementptr %1[2] : (!llvm.ptr) -> !llvm.ptr, i8
- llvm.store %arg, %7 : i16, !llvm.ptr
+ ptr.store %arg, %7 : i16, !llvm.ptr
llvm.return
}
@@ -144,8 +144,8 @@ llvm.func @index_to_struct(%arg: i32) {
// CHECK: %[[GEP0:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, struct<"bar", (i32, i32)>)>
// CHECK: %[[GEP1:.*]] = llvm.getelementptr %[[GEP0]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"bar", (i32, i32)>
%7 = llvm.getelementptr %1[4] : (!llvm.ptr) -> !llvm.ptr, i8
- // CHECK: llvm.store %[[ARG]], %[[GEP1]]
- llvm.store %arg, %7 : i32, !llvm.ptr
+ // CHECK: ptr.store %[[ARG]], %[[GEP1]]
+ ptr.store %arg, %7 : i32, !llvm.ptr
llvm.return
}
@@ -159,7 +159,7 @@ llvm.func @no_crash_on_negative_gep_index() {
%2 = llvm.alloca %1 x !llvm.struct<"foo", (i32, i32, i32)> : (i32) -> !llvm.ptr
// CHECK: llvm.getelementptr %[[ALLOCA]][-1] : (!llvm.ptr) -> !llvm.ptr, f32
%3 = llvm.getelementptr %2[-1] : (!llvm.ptr) -> !llvm.ptr, f32
- llvm.store %0, %3 : f16, !llvm.ptr
+ ptr.store %0, %3 : f16, !llvm.ptr
llvm.return
}
@@ -178,13 +178,13 @@ llvm.func @coalesced_store_ints(%arg: i64) {
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32)>
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
+ // CHECK: ptr.store %[[TRUNC]], %[[GEP]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST32]] : i64
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32)>
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
- llvm.store %arg, %1 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
+ // CHECK: ptr.store %[[TRUNC]], %[[GEP]]
+ ptr.store %arg, %1 : i64, !llvm.ptr
+ // CHECK-NOT: ptr.store %[[ARG]], %[[ALLOCA]]
llvm.return
}
@@ -203,13 +203,13 @@ llvm.func @coalesced_store_ints_offset(%arg: i64) {
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, i32, i32)>
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
+ // CHECK: ptr.store %[[TRUNC]], %[[GEP]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST32]] : i64
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, i32, i32)>
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
- llvm.store %arg, %3 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
+ // CHECK: ptr.store %[[TRUNC]], %[[GEP]]
+ ptr.store %arg, %3 : i64, !llvm.ptr
+ // CHECK-NOT: ptr.store %[[ARG]], %[[ALLOCA]]
llvm.return
}
@@ -229,14 +229,14 @@ llvm.func @coalesced_store_floats(%arg: i64) {
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
// CHECK: %[[BIT_CAST:.*]] = llvm.bitcast %[[TRUNC]] : i32 to f32
- // CHECK: llvm.store %[[BIT_CAST]], %[[GEP]]
+ // CHECK: ptr.store %[[BIT_CAST]], %[[GEP]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST32]] : i64
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (f32, f32)>
// CHECK: %[[BIT_CAST:.*]] = llvm.bitcast %[[TRUNC]] : i32 to f32
- // CHECK: llvm.store %[[BIT_CAST]], %[[GEP]]
- llvm.store %arg, %1 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
+ // CHECK: ptr.store %[[BIT_CAST]], %[[GEP]]
+ ptr.store %arg, %1 : i64, !llvm.ptr
+ // CHECK-NOT: ptr.store %[[ARG]], %[[ALLOCA]]
llvm.return
}
@@ -251,8 +251,8 @@ llvm.func @coalesced_store_padding_inbetween(%arg: i64) {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i16, i32)>
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i16, i32)> : (i32) -> !llvm.ptr
- // CHECK: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.store %arg, %1 : i64, !llvm.ptr
+ // CHECK: ptr.store %[[ARG]], %[[ALLOCA]]
+ ptr.store %arg, %1 : i64, !llvm.ptr
llvm.return
}
@@ -267,8 +267,8 @@ llvm.func @coalesced_store_padding_end(%arg: i64) {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i16)>
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i16)> : (i32) -> !llvm.ptr
- // CHECK: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.store %arg, %1 : i64, !llvm.ptr
+ // CHECK: ptr.store %[[ARG]], %[[ALLOCA]]
+ ptr.store %arg, %1 : i64, !llvm.ptr
llvm.return
}
@@ -281,8 +281,8 @@ llvm.func @coalesced_store_past_end(%arg: i64) {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32)>
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32)> : (i32) -> !llvm.ptr
- // CHECK: llvm.store %[[ARG]], %[[ALLOCA]]
- llvm.store %arg, %1 : i64, !llvm.ptr
+ // CHECK: ptr.store %[[ARG]], %[[ALLOCA]]
+ ptr.store %arg, %1 : i64, !llvm.ptr
llvm.return
}
@@ -301,17 +301,17 @@ llvm.func @coalesced_store_packed_struct(%arg: i64) {
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", packed (i16, i32, i16)>
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i16
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
+ // CHECK: ptr.store %[[TRUNC]], %[[GEP]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST16]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", packed (i16, i32, i16)>
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
+ // CHECK: ptr.store %[[TRUNC]], %[[GEP]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST48]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i16
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", packed (i16, i32, i16)>
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
- llvm.store %arg, %1 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
+ // CHECK: ptr.store %[[TRUNC]], %[[GEP]]
+ ptr.store %arg, %1 : i64, !llvm.ptr
+ // CHECK-NOT: ptr.store %[[ARG]], %[[ALLOCA]]
llvm.return
}
@@ -330,22 +330,22 @@ llvm.func @vector_write_split(%arg: vector<4xi32>) {
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32, i32)>
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST0]] : i32] : vector<4xi32>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
+ // CHECK: ptr.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST1]] : i32] : vector<4xi32>
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32, i32)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
+ // CHECK: ptr.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST2]] : i32] : vector<4xi32>
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32, i32)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
+ // CHECK: ptr.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST3]] : i32] : vector<4xi32>
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 3] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, i32, i32)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
+ // CHECK: ptr.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
- llvm.store %arg, %1 : vector<4xi32>, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
+ ptr.store %arg, %1 : vector<4xi32>, !llvm.ptr
+ // CHECK-NOT: ptr.store %[[ARG]], %[[ALLOCA]]
llvm.return
}
@@ -365,22 +365,22 @@ llvm.func @vector_write_split_offset(%arg: vector<4xi32>) {
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST0]] : i32] : vector<4xi32>
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, i32, i32, i32, i32)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
+ // CHECK: ptr.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST1]] : i32] : vector<4xi32>
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, i32, i32, i32, i32)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
+ // CHECK: ptr.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST2]] : i32] : vector<4xi32>
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 3] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, i32, i32, i32, i32)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
+ // CHECK: ptr.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST3]] : i32] : vector<4xi32>
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 4] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64, i32, i32, i32, i32)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
+ // CHECK: ptr.store %[[EXTRACT]], %[[GEP]] : i32, !llvm.ptr
- llvm.store %arg, %2 : vector<4xi32>, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
+ ptr.store %arg, %2 : vector<4xi32>, !llvm.ptr
+ // CHECK-NOT: ptr.store %[[ARG]], %[[ALLOCA]]
llvm.return
}
@@ -396,10 +396,10 @@ llvm.func @vector_write_split_struct(%arg: vector<2xi64>) {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (i32, i32, i32, i32)>
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, i32, i32)> : (i32) -> !llvm.ptr
- // CHECK-COUNT-4: llvm.store %{{.*}}, %{{.*}} : i32, !llvm.ptr
+ // CHECK-COUNT-4: ptr.store %{{.*}}, %{{.*}} : i32, !llvm.ptr
- llvm.store %arg, %1 : vector<2xi64>, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
+ ptr.store %arg, %1 : vector<2xi64>, !llvm.ptr
+ // CHECK-NOT: ptr.store %[[ARG]], %[[ALLOCA]]
llvm.return
}
@@ -412,8 +412,8 @@ llvm.func @type_consistent_vector_store(%arg: vector<4xi32>) {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x !llvm.struct<"foo", (vector<4xi32>)>
%1 = llvm.alloca %0 x !llvm.struct<"foo", (vector<4xi32>)> : (i32) -> !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (vector<4xi32>)>
- // CHECK: llvm.store %[[ARG]], %[[GEP]]
- llvm.store %arg, %1 : vector<4xi32>, !llvm.ptr
+ // CHECK: ptr.store %[[ARG]], %[[GEP]]
+ ptr.store %arg, %1 : vector<4xi32>, !llvm.ptr
llvm.return
}
@@ -427,9 +427,9 @@ llvm.func @type_consistent_vector_store_other_type(%arg: vector<4xi32>) {
%1 = llvm.alloca %0 x !llvm.struct<"foo", (vector<4xf32>)> : (i32) -> !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (vector<4xf32>)>
// CHECK: %[[BIT_CAST:.*]] = llvm.bitcast %[[ARG]] : vector<4xi32> to vector<4xf32>
- // CHECK: llvm.store %[[BIT_CAST]], %[[GEP]]
- llvm.store %arg, %1 : vector<4xi32>, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
+ // CHECK: ptr.store %[[BIT_CAST]], %[[GEP]]
+ ptr.store %arg, %1 : vector<4xi32>, !llvm.ptr
+ // CHECK-NOT: ptr.store %[[ARG]], %[[ALLOCA]]
llvm.return
}
@@ -442,9 +442,9 @@ llvm.func @bitcast_insertion(%arg: i32) {
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %{{.*}} x f32
%1 = llvm.alloca %0 x f32 : (i32) -> !llvm.ptr
// CHECK: %[[BIT_CAST:.*]] = llvm.bitcast %[[ARG]] : i32 to f32
- // CHECK: llvm.store %[[BIT_CAST]], %[[ALLOCA]]
- llvm.store %arg, %1 : i32, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
+ // CHECK: ptr.store %[[BIT_CAST]], %[[ALLOCA]]
+ ptr.store %arg, %1 : i32, !llvm.ptr
+ // CHECK-NOT: ptr.store %[[ARG]], %[[ALLOCA]]
llvm.return
}
@@ -459,9 +459,9 @@ llvm.func @gep_split(%arg: i64) {
%3 = llvm.getelementptr %1[0, 1, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<2 x struct<"foo", (i64)>>
// CHECK: %[[TOP_GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<2 x struct<"foo", (i64)>>
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64)>
- // CHECK: llvm.store %[[ARG]], %[[GEP]]
- llvm.store %arg, %3 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
+ // CHECK: ptr.store %[[ARG]], %[[GEP]]
+ ptr.store %arg, %3 : i64, !llvm.ptr
+ // CHECK-NOT: ptr.store %[[ARG]], %[[ALLOCA]]
llvm.return
}
@@ -481,13 +481,13 @@ llvm.func @coalesced_store_ints_subaggregate(%arg: i64) {
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i32, i32)>
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
+ // CHECK: ptr.store %[[TRUNC]], %[[GEP]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST32]] : i64
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i32, i32)>
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
- llvm.store %arg, %3 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
+ // CHECK: ptr.store %[[TRUNC]], %[[GEP]]
+ ptr.store %arg, %3 : i64, !llvm.ptr
+ // CHECK-NOT: ptr.store %[[ARG]], %[[ALLOCA]]
llvm.return
}
@@ -502,9 +502,9 @@ llvm.func @gep_result_ptr_type_dynamic(%arg: i64) {
%3 = llvm.getelementptr %1[0, %arg, 0] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.array<2 x struct<"foo", (i64)>>
// CHECK: %[[TOP_GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, %[[ARG]]] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.array<2 x struct<"foo", (i64)>>
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i64)>
- // CHECK: llvm.store %[[ARG]], %[[GEP]]
- llvm.store %arg, %3 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
+ // CHECK: ptr.store %[[ARG]], %[[GEP]]
+ ptr.store %arg, %3 : i64, !llvm.ptr
+ // CHECK-NOT: ptr.store %[[ARG]], %[[ALLOCA]]
llvm.return
}
@@ -523,7 +523,7 @@ llvm.func @overlapping_int_aggregate_store(%arg: i64) {
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i16
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
+ // CHECK: ptr.store %[[TRUNC]], %[[GEP]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST16]] : i64
// CHECK: [[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i48
@@ -532,14 +532,14 @@ llvm.func @overlapping_int_aggregate_store(%arg: i64) {
// Normal integer splitting of [[TRUNC]] follows:
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16)>
- // CHECK: llvm.store %{{.*}}, %[[GEP]]
+ // CHECK: ptr.store %{{.*}}, %[[GEP]]
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16)>
- // CHECK: llvm.store %{{.*}}, %[[GEP]]
+ // CHECK: ptr.store %{{.*}}, %[[GEP]]
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16)>
- // CHECK: llvm.store %{{.*}}, %[[GEP]]
+ // CHECK: ptr.store %{{.*}}, %[[GEP]]
- llvm.store %arg, %1 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
+ ptr.store %arg, %1 : i64, !llvm.ptr
+ // CHECK-NOT: ptr.store %[[ARG]], %[[ALLOCA]]
llvm.return
}
@@ -559,25 +559,25 @@ llvm.func @overlapping_vector_aggregate_store(%arg: vector<4 x i16>) {
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST0]] : i32]
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP]]
+ // CHECK: ptr.store %[[EXTRACT]], %[[GEP]]
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST1]] : i32]
// CHECK: %[[GEP0:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
// CHECK: %[[GEP1:.*]] = llvm.getelementptr %[[GEP0]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP1]]
+ // CHECK: ptr.store %[[EXTRACT]], %[[GEP1]]
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST2]] : i32]
// CHECK: %[[GEP0:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
// CHECK: %[[GEP1:.*]] = llvm.getelementptr %[[GEP0]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP1]]
+ // CHECK: ptr.store %[[EXTRACT]], %[[GEP1]]
// CHECK: %[[EXTRACT:.*]] = llvm.extractelement %[[ARG]][%[[CST3]] : i32]
// CHECK: %[[GEP0:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16)>)>
// CHECK: %[[GEP1:.*]] = llvm.getelementptr %[[GEP0]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16)>
- // CHECK: llvm.store %[[EXTRACT]], %[[GEP1]]
+ // CHECK: ptr.store %[[EXTRACT]], %[[GEP1]]
- llvm.store %arg, %1 : vector<4 x i16>, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
+ ptr.store %arg, %1 : vector<4 x i16>, !llvm.ptr
+ // CHECK-NOT: ptr.store %[[ARG]], %[[ALLOCA]]
llvm.return
}
@@ -596,7 +596,7 @@ llvm.func @partially_overlapping_aggregate_store(%arg: i64) {
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i16, struct<(i16, i16, i16, i16)>)>
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i16
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
+ // CHECK: ptr.store %[[TRUNC]], %[[GEP]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST16]] : i64
// CHECK: [[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i48
@@ -605,18 +605,18 @@ llvm.func @partially_overlapping_aggregate_store(%arg: i64) {
// Normal integer splitting of [[TRUNC]] follows:
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16, i16)>
- // CHECK: llvm.store %{{.*}}, %[[GEP]]
+ // CHECK: ptr.store %{{.*}}, %[[GEP]]
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16, i16)>
- // CHECK: llvm.store %{{.*}}, %[[GEP]]
+ // CHECK: ptr.store %{{.*}}, %[[GEP]]
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[TOP_GEP]][0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(i16, i16, i16, i16)>
- // CHECK: llvm.store %{{.*}}, %[[GEP]]
+ // CHECK: ptr.store %{{.*}}, %[[GEP]]
// It is important that there are no more stores at this point.
// Specifically a store into the fourth field of %[[TOP_GEP]] would
// incorrectly change the semantics of the code.
- // CHECK-NOT: llvm.store %{{.*}}, %{{.*}}
+ // CHECK-NOT: ptr.store %{{.*}}, %{{.*}}
- llvm.store %arg, %1 : i64, !llvm.ptr
+ ptr.store %arg, %1 : i64, !llvm.ptr
llvm.return
}
@@ -633,8 +633,8 @@ llvm.func @undesirable_overlapping_aggregate_store(%arg: i64) {
%1 = llvm.alloca %0 x !llvm.struct<"foo", (i32, i32, struct<(i64, i16, i16, i16)>)> : (i32) -> !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, struct<(i64, i16, i16, i16)>)>
%2 = llvm.getelementptr %1[0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"foo", (i32, i32, struct<(i64, i16, i16, i16)>)>
- // CHECK: llvm.store %[[ARG]], %[[GEP]]
- llvm.store %arg, %2 : i64, !llvm.ptr
+ // CHECK: ptr.store %[[ARG]], %[[GEP]]
+ ptr.store %arg, %2 : i64, !llvm.ptr
llvm.return
}
@@ -654,12 +654,12 @@ llvm.func @coalesced_store_ints_array(%arg: i64) {
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<2 x i32>
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST0]]
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
+ // CHECK: ptr.store %[[TRUNC]], %[[GEP]]
// CHECK: %[[SHR:.*]] = llvm.lshr %[[ARG]], %[[CST32]] : i64
// CHECK: %[[TRUNC:.*]] = llvm.trunc %[[SHR]] : i64 to i32
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<2 x i32>
- // CHECK: llvm.store %[[TRUNC]], %[[GEP]]
- llvm.store %arg, %1 : i64, !llvm.ptr
- // CHECK-NOT: llvm.store %[[ARG]], %[[ALLOCA]]
+ // CHECK: ptr.store %[[TRUNC]], %[[GEP]]
+ ptr.store %arg, %1 : i64, !llvm.ptr
+ // CHECK-NOT: ptr.store %[[ARG]], %[[ALLOCA]]
llvm.return
}
diff --git a/mlir/test/Dialect/MemRef/transform-ops.mlir b/mlir/test/Dialect/MemRef/transform-ops.mlir
index 338f6e3533ad90..53bb2c373927a2 100644
--- a/mlir/test/Dialect/MemRef/transform-ops.mlir
+++ b/mlir/test/Dialect/MemRef/transform-ops.mlir
@@ -369,7 +369,7 @@ module attributes {transform.with_named_sequence} {
transform.apply_conversion_patterns.dialect_to_llvm "memref"
} with type_converter {
transform.apply_conversion_patterns.memref.memref_to_llvm_type_converter
- } {legal_dialects = ["func", "llvm"]} : !transform.any_op
+ } {legal_dialects = ["func", "llvm", "ptr"]} : !transform.any_op
transform.yield
}
}
diff --git a/mlir/test/Dialect/OpenACC/invalid.mlir b/mlir/test/Dialect/OpenACC/invalid.mlir
index c18d964b370f2c..0b1edbb4dad531 100644
--- a/mlir/test/Dialect/OpenACC/invalid.mlir
+++ b/mlir/test/Dialect/OpenACC/invalid.mlir
@@ -283,7 +283,7 @@ acc.private.recipe @privatization_i32 : !llvm.ptr init {
%c1 = arith.constant 1 : i32
%c0 = arith.constant 0 : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
- llvm.store %c0, %0 : i32, !llvm.ptr
+ ptr.store %c0, %0 : i32, !llvm.ptr
acc.yield %0 : !llvm.ptr
} destroy {
^bb0(%arg0 : f32):
@@ -314,7 +314,7 @@ acc.firstprivate.recipe @privatization_i32 : !llvm.ptr init {
%c1 = arith.constant 1 : i32
%c0 = arith.constant 0 : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
- llvm.store %c0, %0 : i32, !llvm.ptr
+ ptr.store %c0, %0 : i32, !llvm.ptr
acc.yield %0 : !llvm.ptr
} copy {
}
@@ -327,7 +327,7 @@ acc.firstprivate.recipe @privatization_i32 : !llvm.ptr init {
%c1 = arith.constant 1 : i32
%c0 = arith.constant 0 : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
- llvm.store %c0, %0 : i32, !llvm.ptr
+ ptr.store %c0, %0 : i32, !llvm.ptr
acc.yield %0 : !llvm.ptr
} copy {
^bb0(%arg0 : f32):
@@ -342,7 +342,7 @@ acc.firstprivate.recipe @privatization_i32 : !llvm.ptr init {
%c1 = arith.constant 1 : i32
%c0 = arith.constant 0 : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
- llvm.store %c0, %0 : i32, !llvm.ptr
+ ptr.store %c0, %0 : i32, !llvm.ptr
acc.yield %0 : !llvm.ptr
} copy {
^bb0(%arg0 : f32, %arg1 : i32):
@@ -358,7 +358,7 @@ acc.firstprivate.recipe @privatization_i32 : i32 init {
acc.yield %0 : i32
} copy {
^bb0(%arg0 : i32, %arg1 : !llvm.ptr):
- llvm.store %arg0, %arg1 : i32, !llvm.ptr
+ ptr.store %arg0, %arg1 : i32, !llvm.ptr
acc.yield
} destroy {
^bb0(%arg0 : f32):
diff --git a/mlir/test/Dialect/OpenACC/ops.mlir b/mlir/test/Dialect/OpenACC/ops.mlir
index 5a95811685f845..06f95edbf62aa3 100644
--- a/mlir/test/Dialect/OpenACC/ops.mlir
+++ b/mlir/test/Dialect/OpenACC/ops.mlir
@@ -1445,7 +1445,7 @@ acc.private.recipe @privatization_i32 : !llvm.ptr init {
%c1 = arith.constant 1 : i32
%c0 = arith.constant 0 : i32
%0 = llvm.alloca %c1 x i32 : (i32) -> !llvm.ptr
- llvm.store %c0, %0 : i32, !llvm.ptr
+ ptr.store %c0, %0 : i32, !llvm.ptr
acc.yield %0 : !llvm.ptr
}
@@ -1453,7 +1453,7 @@ acc.private.recipe @privatization_i32 : !llvm.ptr init {
// CHECK: %[[C1:.*]] = arith.constant 1 : i32
// CHECK: %[[C0:.*]] = arith.constant 0 : i32
// CHECK: %[[ALLOCA:.*]] = llvm.alloca %[[C1]] x i32 : (i32) -> !llvm.ptr
-// CHECK: llvm.store %[[C0]], %[[ALLOCA]] : i32, !llvm.ptr
+// CHECK: ptr.store %[[C0]], %[[ALLOCA]] : i32, !llvm.ptr
// CHECK: acc.yield %[[ALLOCA]] : !llvm.ptr
// -----
diff --git a/mlir/test/Dialect/OpenMP/canonicalize.mlir b/mlir/test/Dialect/OpenMP/canonicalize.mlir
index de6c931ecc5fd9..8b4848803cc1df 100644
--- a/mlir/test/Dialect/OpenMP/canonicalize.mlir
+++ b/mlir/test/Dialect/OpenMP/canonicalize.mlir
@@ -133,7 +133,7 @@ func.func @constant_hoisting_target(%x : !llvm.ptr) {
omp.target {
^bb0(%arg0: !llvm.ptr):
%c1 = arith.constant 10 : i32
- llvm.store %c1, %arg0 : i32, !llvm.ptr
+ ptr.store %c1, %arg0 : i32, !llvm.ptr
omp.terminator
}
return
diff --git a/mlir/test/Dialect/OpenMP/invalid.mlir b/mlir/test/Dialect/OpenMP/invalid.mlir
index 2b0e86ddd22bbc..d5448881bc8b2d 100644
--- a/mlir/test/Dialect/OpenMP/invalid.mlir
+++ b/mlir/test/Dialect/OpenMP/invalid.mlir
@@ -523,8 +523,8 @@ combiner {
}
atomic {
^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr):
- %2 = llvm.load %arg3 : !llvm.ptr -> f32
- llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
+ %2 = ptr.load %arg3 : !llvm.ptr -> f32
+ ptr.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
omp.yield
}
@@ -1349,8 +1349,8 @@ combiner {
}
atomic {
^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr):
- %2 = llvm.load %arg3 : !llvm.ptr -> i32
- llvm.atomicrmw add %arg2, %2 monotonic : !llvm.ptr, i32
+ %2 = ptr.load %arg3 : !llvm.ptr -> i32
+ ptr.atomicrmw add %arg2, %2 monotonic : !llvm.ptr, i32
omp.yield
}
diff --git a/mlir/test/Dialect/OpenMP/ops.mlir b/mlir/test/Dialect/OpenMP/ops.mlir
index 3d4f6435572f7f..7912c7e0334520 100644
--- a/mlir/test/Dialect/OpenMP/ops.mlir
+++ b/mlir/test/Dialect/OpenMP/ops.mlir
@@ -586,8 +586,8 @@ combiner {
}
atomic {
^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr):
- %2 = llvm.load %arg3 : !llvm.ptr -> f32
- llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
+ %2 = ptr.load %arg3 : !llvm.ptr -> f32
+ ptr.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
omp.yield
}
@@ -1689,16 +1689,16 @@ func.func @omp_threadprivate() {
// CHECK: {{.*}} = omp.threadprivate [[ARG0]] : !llvm.ptr -> !llvm.ptr
%3 = llvm.mlir.addressof @_QFsubEx : !llvm.ptr
%4 = omp.threadprivate %3 : !llvm.ptr -> !llvm.ptr
- llvm.store %0, %4 : i32, !llvm.ptr
+ ptr.store %0, %4 : i32, !llvm.ptr
// CHECK: omp.parallel
// CHECK: {{.*}} = omp.threadprivate [[ARG0]] : !llvm.ptr -> !llvm.ptr
omp.parallel {
%5 = omp.threadprivate %3 : !llvm.ptr -> !llvm.ptr
- llvm.store %1, %5 : i32, !llvm.ptr
+ ptr.store %1, %5 : i32, !llvm.ptr
omp.terminator
}
- llvm.store %2, %4 : i32, !llvm.ptr
+ ptr.store %2, %4 : i32, !llvm.ptr
return
}
@@ -1995,14 +1995,14 @@ func.func @omp_requires_multiple() -> ()
// CHECK-SAME: (%[[v:.*]]: !llvm.ptr, %[[x:.*]]: !llvm.ptr)
func.func @opaque_pointers_atomic_rwu(%v: !llvm.ptr, %x: !llvm.ptr) {
// CHECK: omp.atomic.read %[[v]] = %[[x]] : !llvm.ptr, i32
- // CHECK: %[[VAL:.*]] = llvm.load %[[x]] : !llvm.ptr -> i32
+ // CHECK: %[[VAL:.*]] = ptr.load %[[x]] : !llvm.ptr -> i32
// CHECK: omp.atomic.write %[[v]] = %[[VAL]] : !llvm.ptr, i32
// CHECK: omp.atomic.update %[[x]] : !llvm.ptr {
// CHECK-NEXT: ^{{[[:alnum:]]+}}(%[[XVAL:.*]]: i32):
// CHECK-NEXT: omp.yield(%[[XVAL]] : i32)
// CHECK-NEXT: }
omp.atomic.read %v = %x : !llvm.ptr, i32
- %val = llvm.load %x : !llvm.ptr -> i32
+ %val = ptr.load %x : !llvm.ptr -> i32
omp.atomic.write %v = %val : !llvm.ptr, i32
omp.atomic.update %x : !llvm.ptr {
^bb0(%xval: i32):
@@ -2027,8 +2027,8 @@ combiner {
}
atomic {
^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr):
- %2 = llvm.load %arg3 : !llvm.ptr -> f32
- llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
+ %2 = ptr.load %arg3 : !llvm.ptr -> f32
+ ptr.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
omp.yield
}
diff --git a/mlir/test/Target/LLVMIR/arm-sve.mlir b/mlir/test/Target/LLVMIR/arm-sve.mlir
index b63d3f06515690..4e77dfcead9aae 100644
--- a/mlir/test/Target/LLVMIR/arm-sve.mlir
+++ b/mlir/test/Target/LLVMIR/arm-sve.mlir
@@ -249,7 +249,7 @@ llvm.func @memcopy(%arg0: !llvm.ptr, %arg1: !llvm.ptr,
// CHECK: getelementptr float, ptr
%19 = llvm.getelementptr %18[%16] : (!llvm.ptr, i64) -> !llvm.ptr, f32
// CHECK: load <vscale x 4 x float>, ptr
- %21 = llvm.load %19 : !llvm.ptr -> vector<[4]xf32>
+ %21 = ptr.load %19 : !llvm.ptr -> vector<[4]xf32>
// CHECK: extractvalue { ptr, ptr, i64, [1 x i64], [1 x i64] }
%22 = llvm.extractvalue %11[1] : !llvm.struct<(ptr, ptr, i64,
array<1 x i64>,
@@ -257,7 +257,7 @@ llvm.func @memcopy(%arg0: !llvm.ptr, %arg1: !llvm.ptr,
// CHECK: getelementptr float, ptr
%23 = llvm.getelementptr %22[%16] : (!llvm.ptr, i64) -> !llvm.ptr, f32
// CHECK: store <vscale x 4 x float> %{{[0-9]+}}, ptr %{{[0-9]+}}
- llvm.store %21, %23 : vector<[4]xf32>, !llvm.ptr
+ ptr.store %21, %23 : vector<[4]xf32>, !llvm.ptr
%25 = llvm.add %16, %15 : i64
llvm.br ^bb1(%25 : i64)
^bb3:
diff --git a/mlir/test/Target/LLVMIR/attribute-alias-scopes.mlir b/mlir/test/Target/LLVMIR/attribute-alias-scopes.mlir
index 4434aea4ec965f..ed92c800f4d832 100644
--- a/mlir/test/Target/LLVMIR/attribute-alias-scopes.mlir
+++ b/mlir/test/Target/LLVMIR/attribute-alias-scopes.mlir
@@ -13,11 +13,11 @@ llvm.func @alias_scopes(%arg1 : !llvm.ptr) {
// CHECK: call void @llvm.experimental.noalias.scope.decl(metadata ![[SCOPES1:[0-9]+]])
llvm.intr.experimental.noalias.scope.decl #alias_scope1
// CHECK: store {{.*}}, !alias.scope ![[SCOPES1]], !noalias ![[SCOPES23:[0-9]+]]
- llvm.store %0, %arg1 {alias_scopes = [#alias_scope1], noalias_scopes = [#alias_scope2, #alias_scope3]} : i32, !llvm.ptr
+ ptr.store %0, %arg1 {alias_scopes = [#alias_scope1], noalias_scopes = [#alias_scope2, #alias_scope3]} : i32, !llvm.ptr
// CHECK: load {{.*}}, !alias.scope ![[SCOPES2:[0-9]+]], !noalias ![[SCOPES13:[0-9]+]]
- %1 = llvm.load %arg1 {alias_scopes = [#alias_scope2], noalias_scopes = [#alias_scope1, #alias_scope3]} : !llvm.ptr -> i32
+ %1 = ptr.load %arg1 {alias_scopes = [#alias_scope2], noalias_scopes = [#alias_scope1, #alias_scope3]} : !llvm.ptr -> i32
// CHECK: atomicrmw {{.*}}, !alias.scope ![[SCOPES3:[0-9]+]], !noalias ![[SCOPES12:[0-9]+]]
- %2 = llvm.atomicrmw add %arg1, %0 monotonic {alias_scopes = [#alias_scope3], noalias_scopes = [#alias_scope1, #alias_scope2]} : !llvm.ptr, i32
+ %2 = ptr.atomicrmw add %arg1, %0 monotonic {alias_scopes = [#alias_scope3], noalias_scopes = [#alias_scope1, #alias_scope2]} : !llvm.ptr, i32
// CHECK: cmpxchg {{.*}}, !alias.scope ![[SCOPES3]]
%3 = llvm.cmpxchg %arg1, %1, %2 acq_rel monotonic {alias_scopes = [#alias_scope3]} : !llvm.ptr, i32
%5 = llvm.mlir.constant(42 : i8) : i8
diff --git a/mlir/test/Target/LLVMIR/attribute-tbaa.mlir b/mlir/test/Target/LLVMIR/attribute-tbaa.mlir
index 69b666a1ec3538..4805d06aaab7b3 100644
--- a/mlir/test/Target/LLVMIR/attribute-tbaa.mlir
+++ b/mlir/test/Target/LLVMIR/attribute-tbaa.mlir
@@ -14,11 +14,11 @@ llvm.func @tbaa2(%arg0: !llvm.ptr, %arg1: !llvm.ptr) {
%1 = llvm.mlir.constant(1 : i32) : i32
%2 = llvm.getelementptr inbounds %arg1[%0, 1] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<"struct.agg2_t", (i64, i64)>
// CHECK: load i64, ptr %{{.*}},{{.*}}!tbaa ![[LTAG:[0-9]*]]
- %3 = llvm.load %2 {tbaa = [#tbaa_tag_4]} : !llvm.ptr -> i64
+ %3 = ptr.load %2 {tbaa = [#tbaa_tag_4]} : !llvm.ptr -> i64
%4 = llvm.trunc %3 : i64 to i32
%5 = llvm.getelementptr inbounds %arg0[%0, 0] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<"struct.agg1_t", (i32, i32)>
// CHECK: store i32 %{{.*}}, ptr %{{.*}},{{.*}}!tbaa ![[STAG:[0-9]*]]
- llvm.store %4, %5 {tbaa = [#tbaa_tag_7]} : i32, !llvm.ptr
+ ptr.store %4, %5 {tbaa = [#tbaa_tag_7]} : i32, !llvm.ptr
llvm.return
}
@@ -58,13 +58,13 @@ llvm.func @tbaa2(%arg0: !llvm.ptr, %arg1: !llvm.ptr) {
%1 = llvm.mlir.constant(1 : i32) : i32
%2 = llvm.getelementptr inbounds %arg1[%0, 0] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<"struct.agg2_t", (f32, f32)>
// CHECK: load float, ptr %{{.*}},{{.*}}!tbaa ![[LTAG:[0-9]*]]
- %3 = llvm.load %2 {tbaa = [#tbaa_tag_4]} : !llvm.ptr -> f32
+ %3 = ptr.load %2 {tbaa = [#tbaa_tag_4]} : !llvm.ptr -> f32
%4 = llvm.fptosi %3 : f32 to i32
%5 = llvm.getelementptr inbounds %arg0[%0, 0] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<"struct.agg1_t", (i32, i32)>
// CHECK: store i32 %{{.*}}, ptr %{{.*}},{{.*}}!tbaa ![[STAG:[0-9]*]]
- llvm.store %4, %5 {tbaa = [#tbaa_tag_7]} : i32, !llvm.ptr
+ ptr.store %4, %5 {tbaa = [#tbaa_tag_7]} : i32, !llvm.ptr
// CHECK: atomicrmw add ptr %{{.*}}, i32 %{{.*}} !tbaa ![[STAG]]
- %6 = llvm.atomicrmw add %5, %4 monotonic {tbaa = [#tbaa_tag_7]} : !llvm.ptr, i32
+ %6 = ptr.atomicrmw add %5, %4 monotonic {tbaa = [#tbaa_tag_7]} : !llvm.ptr, i32
// CHECK: cmpxchg ptr %{{.*}}, i32 %{{.*}}, i32 %{{.*}} !tbaa ![[STAG]]
%7 = llvm.cmpxchg %5, %6, %4 acq_rel monotonic {tbaa = [#tbaa_tag_7]} : !llvm.ptr, i32
%9 = llvm.mlir.constant(42 : i8) : i8
diff --git a/mlir/test/Target/LLVMIR/llvmir.mlir b/mlir/test/Target/LLVMIR/llvmir.mlir
index 13e61b6ce10b2a..10121442b2d2f8 100644
--- a/mlir/test/Target/LLVMIR/llvmir.mlir
+++ b/mlir/test/Target/LLVMIR/llvmir.mlir
@@ -198,14 +198,14 @@ llvm.func @global_refs() {
// Check load from globals.
// CHECK: load i32, ptr @i32_global
%0 = llvm.mlir.addressof @i32_global : !llvm.ptr
- %1 = llvm.load %0 : !llvm.ptr -> i32
+ %1 = ptr.load %0 : !llvm.ptr -> i32
// Check the contracted form of load from array constants.
// CHECK: load i8, ptr @string_const
%2 = llvm.mlir.addressof @string_const : !llvm.ptr
%c0 = llvm.mlir.constant(0 : index) : i64
%3 = llvm.getelementptr %2[%c0, %c0] : (!llvm.ptr, i64, i64) -> !llvm.ptr, !llvm.array<6 x i8>
- %4 = llvm.load %3 : !llvm.ptr -> i8
+ %4 = ptr.load %3 : !llvm.ptr -> i8
llvm.return
}
@@ -589,7 +589,7 @@ llvm.func @store_load_static() {
%12 = llvm.mlir.constant(10 : index) : i64
%13 = llvm.extractvalue %6[0] : !llvm.struct<(ptr)>
%14 = llvm.getelementptr %13[%10] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %7, %14 : f32, !llvm.ptr
+ ptr.store %7, %14 : f32, !llvm.ptr
%15 = llvm.mlir.constant(1 : index) : i64
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
%16 = llvm.add %10, %15 : i64
@@ -614,7 +614,7 @@ llvm.func @store_load_static() {
%21 = llvm.mlir.constant(10 : index) : i64
%22 = llvm.extractvalue %6[0] : !llvm.struct<(ptr)>
%23 = llvm.getelementptr %22[%19] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- %24 = llvm.load %23 : !llvm.ptr -> f32
+ %24 = ptr.load %23 : !llvm.ptr -> f32
%25 = llvm.mlir.constant(1 : index) : i64
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
%26 = llvm.add %19, %25 : i64
@@ -657,7 +657,7 @@ llvm.func @store_load_dynamic(%arg0: i64) {
%11 = llvm.extractvalue %6[1] : !llvm.struct<(ptr, i64)>
%12 = llvm.extractvalue %6[0] : !llvm.struct<(ptr, i64)>
%13 = llvm.getelementptr %12[%9] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %7, %13 : f32, !llvm.ptr
+ ptr.store %7, %13 : f32, !llvm.ptr
%14 = llvm.mlir.constant(1 : index) : i64
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
%15 = llvm.add %9, %14 : i64
@@ -682,7 +682,7 @@ llvm.func @store_load_dynamic(%arg0: i64) {
%19 = llvm.extractvalue %6[1] : !llvm.struct<(ptr, i64)>
%20 = llvm.extractvalue %6[0] : !llvm.struct<(ptr, i64)>
%21 = llvm.getelementptr %20[%17] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- %22 = llvm.load %21 : !llvm.ptr -> f32
+ %22 = ptr.load %21 : !llvm.ptr -> f32
%23 = llvm.mlir.constant(1 : index) : i64
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, 1
%24 = llvm.add %17, %23 : i64
@@ -747,7 +747,7 @@ llvm.func @store_load_mixed(%arg0: i64) {
%28 = llvm.add %27, %17 : i64
%29 = llvm.extractvalue %13[0] : !llvm.struct<(ptr, i64, i64)>
%30 = llvm.getelementptr %29[%28] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %18, %30 : f32, !llvm.ptr
+ ptr.store %18, %30 : f32, !llvm.ptr
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 1
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64, i64 } %{{[0-9]+}}, 2
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, %{{[0-9]+}}
@@ -771,7 +771,7 @@ llvm.func @store_load_mixed(%arg0: i64) {
%40 = llvm.add %39, %14 : i64
%41 = llvm.extractvalue %13[0] : !llvm.struct<(ptr, i64, i64)>
%42 = llvm.getelementptr %41[%40] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- %43 = llvm.load %42 : !llvm.ptr -> f32
+ %43 = ptr.load %42 : !llvm.ptr -> f32
// CHECK-NEXT: ret void
llvm.return
}
@@ -788,7 +788,7 @@ llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr)>, %arg1: !llvm.struct<(ptr
%3 = llvm.mlir.constant(10 : index) : i64
%4 = llvm.extractvalue %arg0[0] : !llvm.struct<(ptr)>
%5 = llvm.getelementptr %4[%0] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %2, %5 : f32, !llvm.ptr
+ ptr.store %2, %5 : f32, !llvm.ptr
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 1
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 0
// CHECK-NEXT: %{{[0-9]+}} = getelementptr float, ptr %{{[0-9]+}}, i64 7
@@ -796,7 +796,7 @@ llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr)>, %arg1: !llvm.struct<(ptr
%6 = llvm.extractvalue %arg1[1] : !llvm.struct<(ptr, i64)>
%7 = llvm.extractvalue %arg1[0] : !llvm.struct<(ptr, i64)>
%8 = llvm.getelementptr %7[%0] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %2, %8 : f32, !llvm.ptr
+ ptr.store %2, %8 : f32, !llvm.ptr
// CHECK-NEXT: %{{[0-9]+}} = extractvalue { ptr, i64 } %{{[0-9]+}}, 1
// CHECK-NEXT: %{{[0-9]+}} = mul i64 7, %{{[0-9]+}}
// CHECK-NEXT: %{{[0-9]+}} = add i64 %{{[0-9]+}}, %{{[0-9]+}}
@@ -809,7 +809,7 @@ llvm.func @memref_args_rets(%arg0: !llvm.struct<(ptr)>, %arg1: !llvm.struct<(ptr
%12 = llvm.add %11, %1 : i64
%13 = llvm.extractvalue %arg2[0] : !llvm.struct<(ptr, i64)>
%14 = llvm.getelementptr %13[%12] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %2, %14 : f32, !llvm.ptr
+ ptr.store %2, %14 : f32, !llvm.ptr
// CHECK-NEXT: %{{[0-9]+}} = mul i64 10, %{{[0-9]+}}
// CHECK-NEXT: %{{[0-9]+}} = mul i64 %{{[0-9]+}}, 4
// CHECK-NEXT: %{{[0-9]+}} = call ptr @malloc(i64 %{{[0-9]+}})
@@ -901,7 +901,7 @@ llvm.func @multireturn_caller() {
%18 = llvm.add %17, %8 : i64
%19 = llvm.extractvalue %3[0] : !llvm.struct<(ptr, i64, i64)>
%20 = llvm.getelementptr %19[%18] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- %21 = llvm.load %20 : !llvm.ptr -> f32
+ %21 = ptr.load %20 : !llvm.ptr -> f32
llvm.return
}
@@ -1246,8 +1246,8 @@ llvm.func @indirect_varargs_call(%arg0 : !llvm.ptr, %arg1 : i32) {
llvm.func @intpointerconversion(%arg0 : i32) -> i32 {
// CHECK: %2 = inttoptr i32 %0 to ptr
// CHECK-NEXT: %3 = ptrtoint ptr %2 to i32
- %1 = llvm.inttoptr %arg0 : i32 to !llvm.ptr
- %2 = llvm.ptrtoint %1 : !llvm.ptr to i32
+ %1 = ptr.inttoptr %arg0 : i32 to !llvm.ptr
+ %2 = ptr.ptrtoint %1 : !llvm.ptr to i32
llvm.return %2 : i32
}
@@ -1266,7 +1266,7 @@ llvm.func @fpconversion(%arg0 : i32) -> i32 {
// CHECK-LABEL: @addrspace
llvm.func @addrspace(%arg0 : !llvm.ptr) -> !llvm.ptr<2> {
// CHECK: %2 = addrspacecast ptr %0 to ptr addrspace(2)
- %1 = llvm.addrspacecast %arg0 : !llvm.ptr to !llvm.ptr<2>
+ %1 = ptr.addrspacecast %arg0 : !llvm.ptr to !llvm.ptr<2>
llvm.return %1 : !llvm.ptr<2>
}
@@ -1468,44 +1468,44 @@ llvm.func @atomicrmw(
%f32_ptr : !llvm.ptr, %f32 : f32,
%i32_ptr : !llvm.ptr, %i32 : i32) {
// CHECK: atomicrmw fadd ptr %{{.*}}, float %{{.*}} monotonic
- %0 = llvm.atomicrmw fadd %f32_ptr, %f32 monotonic : !llvm.ptr, f32
+ %0 = ptr.atomicrmw fadd %f32_ptr, %f32 monotonic : !llvm.ptr, f32
// CHECK: atomicrmw fsub ptr %{{.*}}, float %{{.*}} monotonic
- %1 = llvm.atomicrmw fsub %f32_ptr, %f32 monotonic : !llvm.ptr, f32
+ %1 = ptr.atomicrmw fsub %f32_ptr, %f32 monotonic : !llvm.ptr, f32
// CHECK: atomicrmw fmax ptr %{{.*}}, float %{{.*}} monotonic
- %2 = llvm.atomicrmw fmax %f32_ptr, %f32 monotonic : !llvm.ptr, f32
+ %2 = ptr.atomicrmw fmax %f32_ptr, %f32 monotonic : !llvm.ptr, f32
// CHECK: atomicrmw fmin ptr %{{.*}}, float %{{.*}} monotonic
- %3 = llvm.atomicrmw fmin %f32_ptr, %f32 monotonic : !llvm.ptr, f32
+ %3 = ptr.atomicrmw fmin %f32_ptr, %f32 monotonic : !llvm.ptr, f32
// CHECK: atomicrmw xchg ptr %{{.*}}, float %{{.*}} monotonic
- %4 = llvm.atomicrmw xchg %f32_ptr, %f32 monotonic : !llvm.ptr, f32
+ %4 = ptr.atomicrmw xchg %f32_ptr, %f32 monotonic : !llvm.ptr, f32
// CHECK: atomicrmw add ptr %{{.*}}, i32 %{{.*}} acquire
- %5 = llvm.atomicrmw add %i32_ptr, %i32 acquire : !llvm.ptr, i32
+ %5 = ptr.atomicrmw add %i32_ptr, %i32 acquire : !llvm.ptr, i32
// CHECK: atomicrmw sub ptr %{{.*}}, i32 %{{.*}} release
- %6 = llvm.atomicrmw sub %i32_ptr, %i32 release : !llvm.ptr, i32
+ %6 = ptr.atomicrmw sub %i32_ptr, %i32 release : !llvm.ptr, i32
// CHECK: atomicrmw and ptr %{{.*}}, i32 %{{.*}} acq_rel
- %7 = llvm.atomicrmw _and %i32_ptr, %i32 acq_rel : !llvm.ptr, i32
+ %7 = ptr.atomicrmw _and %i32_ptr, %i32 acq_rel : !llvm.ptr, i32
// CHECK: atomicrmw nand ptr %{{.*}}, i32 %{{.*}} seq_cst
- %8 = llvm.atomicrmw nand %i32_ptr, %i32 seq_cst : !llvm.ptr, i32
+ %8 = ptr.atomicrmw nand %i32_ptr, %i32 seq_cst : !llvm.ptr, i32
// CHECK: atomicrmw or ptr %{{.*}}, i32 %{{.*}} monotonic
- %9 = llvm.atomicrmw _or %i32_ptr, %i32 monotonic : !llvm.ptr, i32
+ %9 = ptr.atomicrmw _or %i32_ptr, %i32 monotonic : !llvm.ptr, i32
// CHECK: atomicrmw xor ptr %{{.*}}, i32 %{{.*}} monotonic
- %10 = llvm.atomicrmw _xor %i32_ptr, %i32 monotonic : !llvm.ptr, i32
+ %10 = ptr.atomicrmw _xor %i32_ptr, %i32 monotonic : !llvm.ptr, i32
// CHECK: atomicrmw max ptr %{{.*}}, i32 %{{.*}} monotonic
- %11 = llvm.atomicrmw max %i32_ptr, %i32 monotonic : !llvm.ptr, i32
+ %11 = ptr.atomicrmw max %i32_ptr, %i32 monotonic : !llvm.ptr, i32
// CHECK: atomicrmw min ptr %{{.*}}, i32 %{{.*}} monotonic
- %12 = llvm.atomicrmw min %i32_ptr, %i32 monotonic : !llvm.ptr, i32
+ %12 = ptr.atomicrmw min %i32_ptr, %i32 monotonic : !llvm.ptr, i32
// CHECK: atomicrmw umax ptr %{{.*}}, i32 %{{.*}} monotonic
- %13 = llvm.atomicrmw umax %i32_ptr, %i32 monotonic : !llvm.ptr, i32
+ %13 = ptr.atomicrmw umax %i32_ptr, %i32 monotonic : !llvm.ptr, i32
// CHECK: atomicrmw umin ptr %{{.*}}, i32 %{{.*}} monotonic
- %14 = llvm.atomicrmw umin %i32_ptr, %i32 monotonic : !llvm.ptr, i32
+ %14 = ptr.atomicrmw umin %i32_ptr, %i32 monotonic : !llvm.ptr, i32
// CHECK: atomicrmw uinc_wrap ptr %{{.*}}, i32 %{{.*}} monotonic
- %15 = llvm.atomicrmw uinc_wrap %i32_ptr, %i32 monotonic : !llvm.ptr, i32
+ %15 = ptr.atomicrmw uinc_wrap %i32_ptr, %i32 monotonic : !llvm.ptr, i32
// CHECK: atomicrmw udec_wrap ptr %{{.*}}, i32 %{{.*}} monotonic
- %16 = llvm.atomicrmw udec_wrap %i32_ptr, %i32 monotonic : !llvm.ptr, i32
+ %16 = ptr.atomicrmw udec_wrap %i32_ptr, %i32 monotonic : !llvm.ptr, i32
// CHECK: atomicrmw volatile
// CHECK-SAME: syncscope("singlethread")
// CHECK-SAME: align 8
- %17 = llvm.atomicrmw volatile udec_wrap %i32_ptr, %i32 syncscope("singlethread") monotonic {alignment = 8 : i64} : !llvm.ptr, i32
+ %17 = ptr.atomicrmw volatile udec_wrap %i32_ptr, %i32 syncscope("singlethread") monotonic {alignment = 8 : i64} : !llvm.ptr, i32
llvm.return
}
@@ -1593,7 +1593,7 @@ llvm.func @invoke_result(%arg0 : !llvm.ptr) attributes { personality = @__gxx_pe
// CHECK-NEXT: store i8 %[[a1]], ptr %[[a0]]
// CHECK-NEXT: ret void
^bb1:
- llvm.store %0, %arg0 : i8, !llvm.ptr
+ ptr.store %0, %arg0 : i8, !llvm.ptr
llvm.return
// CHECK: [[unwind]]:
@@ -1887,9 +1887,9 @@ llvm.func @volatile_store_and_load() {
%size = llvm.mlir.constant(1 : i64) : i64
%0 = llvm.alloca %size x i32 : (i64) -> (!llvm.ptr)
// CHECK: store volatile i32 5, ptr %{{.*}}
- llvm.store volatile %val, %0 : i32, !llvm.ptr
+ ptr.store volatile %val, %0 : i32, !llvm.ptr
// CHECK: %{{.*}} = load volatile i32, ptr %{{.*}}
- %1 = llvm.load volatile %0: !llvm.ptr -> i32
+ %1 = ptr.load volatile %0: !llvm.ptr -> i32
llvm.return
}
@@ -1901,9 +1901,9 @@ llvm.func @nontemporal_store_and_load() {
%size = llvm.mlir.constant(1 : i64) : i64
%0 = llvm.alloca %size x i32 : (i64) -> (!llvm.ptr)
// CHECK: !nontemporal ![[NODE:[0-9]+]]
- llvm.store %val, %0 {nontemporal} : i32, !llvm.ptr
+ ptr.store %val, %0 {nontemporal} : i32, !llvm.ptr
// CHECK: !nontemporal ![[NODE]]
- %1 = llvm.load %0 {nontemporal} : !llvm.ptr -> i32
+ %1 = ptr.load %0 {nontemporal} : !llvm.ptr -> i32
llvm.return
}
@@ -1914,17 +1914,17 @@ llvm.func @nontemporal_store_and_load() {
llvm.func @atomic_store_and_load(%ptr : !llvm.ptr) {
// CHECK: load atomic
// CHECK-SAME: acquire, align 4
- %1 = llvm.load %ptr atomic acquire {alignment = 4 : i64} : !llvm.ptr -> f32
+ %1 = ptr.load %ptr atomic acquire {alignment = 4 : i64} : !llvm.ptr -> f32
// CHECK: load atomic
// CHECK-SAME: syncscope("singlethread") acquire, align 4
- %2 = llvm.load %ptr atomic syncscope("singlethread") acquire {alignment = 4 : i64} : !llvm.ptr -> f32
+ %2 = ptr.load %ptr atomic syncscope("singlethread") acquire {alignment = 4 : i64} : !llvm.ptr -> f32
// CHECK: store atomic
// CHECK-SAME: release, align 4
- llvm.store %1, %ptr atomic release {alignment = 4 : i64} : f32, !llvm.ptr
+ ptr.store %1, %ptr atomic release {alignment = 4 : i64} : f32, !llvm.ptr
// CHECK: store atomic
// CHECK-SAME: syncscope("singlethread") release, align 4
- llvm.store %2, %ptr atomic syncscope("singlethread") release {alignment = 4 : i64} : f32, !llvm.ptr
+ ptr.store %2, %ptr atomic syncscope("singlethread") release {alignment = 4 : i64} : f32, !llvm.ptr
llvm.return
}
@@ -2338,7 +2338,7 @@ llvm.func @zeroinit_complex_local_aggregate() {
// CHECK: store [1000 x { i32, [3 x { double, <4 x ptr>, [2 x ptr] }], [6 x ptr] }] zeroinitializer, ptr %[[#VAR]], align 32
%2 = llvm.mlir.zero : !llvm.array<1000 x !llvm.struct<(i32, !llvm.array<3 x !llvm.struct<(f64, !llvm.vec<4 x ptr>, !llvm.array<2 x ptr>)>>, !llvm.array<6 x ptr>)>>
- llvm.store %2, %1 : !llvm.array<1000 x !llvm.struct<(i32, !llvm.array<3 x !llvm.struct<(f64, !llvm.vec<4 x ptr>, !llvm.array<2 x ptr>)>>, !llvm.array<6 x ptr>)>>, !llvm.ptr
+ ptr.store %2, %1 : !llvm.array<1000 x !llvm.struct<(i32, !llvm.array<3 x !llvm.struct<(f64, !llvm.vec<4 x ptr>, !llvm.array<2 x ptr>)>>, !llvm.array<6 x ptr>)>>, !llvm.ptr
llvm.return
}
diff --git a/mlir/test/Target/LLVMIR/loop-metadata.mlir b/mlir/test/Target/LLVMIR/loop-metadata.mlir
index a9aeebfa4d82d0..49206f47b98b73 100644
--- a/mlir/test/Target/LLVMIR/loop-metadata.mlir
+++ b/mlir/test/Target/LLVMIR/loop-metadata.mlir
@@ -254,11 +254,11 @@ llvm.func @loopOptions(%arg1 : i32, %arg2 : i32) {
^bb4:
%3 = llvm.add %1, %arg2 : i32
// CHECK: = load i32, ptr %{{.*}} !llvm.access.group ![[ACCESS_GROUPS_NODE:[0-9]+]]
- %5 = llvm.load %4 {access_groups = [#group1, #group2]} : !llvm.ptr -> i32
+ %5 = ptr.load %4 {access_groups = [#group1, #group2]} : !llvm.ptr -> i32
// CHECK: store i32 %{{.*}}, ptr %{{.*}} !llvm.access.group ![[ACCESS_GROUPS_NODE]]
- llvm.store %5, %4 {access_groups = [#group1, #group2]} : i32, !llvm.ptr
+ ptr.store %5, %4 {access_groups = [#group1, #group2]} : i32, !llvm.ptr
// CHECK: = atomicrmw add ptr %{{.*}}, i32 %{{.*}} !llvm.access.group ![[ACCESS_GROUPS_NODE]]
- %6 = llvm.atomicrmw add %4, %5 monotonic {access_groups = [#group1, #group2]} : !llvm.ptr, i32
+ %6 = ptr.atomicrmw add %4, %5 monotonic {access_groups = [#group1, #group2]} : !llvm.ptr, i32
// CHECK: = cmpxchg ptr %{{.*}}, i32 %{{.*}}, i32 %{{.*}} !llvm.access.group ![[ACCESS_GROUPS_NODE]]
%7 = llvm.cmpxchg %4, %5, %6 acq_rel monotonic {access_groups = [#group1, #group2]} : !llvm.ptr, i32
%9 = llvm.mlir.constant(42 : i8) : i8
diff --git a/mlir/test/Target/LLVMIR/omptarget-array-sectioning-host.mlir b/mlir/test/Target/LLVMIR/omptarget-array-sectioning-host.mlir
index 307d8a02ce61da..932ddb1cc84bea 100644
--- a/mlir/test/Target/LLVMIR/omptarget-array-sectioning-host.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-array-sectioning-host.mlir
@@ -23,9 +23,9 @@ module attributes {omp.is_target_device = false} {
%9 = llvm.mlir.constant(0 : i64) : i64
%10 = llvm.mlir.constant(1 : i64) : i64
%11 = llvm.getelementptr %arg0[0, %10, %9, %9] : (!llvm.ptr, i64, i64, i64) -> !llvm.ptr, !llvm.array<3 x array<3 x array<3 x i32>>>
- %12 = llvm.load %11 : !llvm.ptr -> i32
+ %12 = ptr.load %11 : !llvm.ptr -> i32
%13 = llvm.getelementptr %arg1[0, %10, %9, %9] : (!llvm.ptr, i64, i64, i64) -> !llvm.ptr, !llvm.array<3 x array<3 x array<3 x i32>>>
- llvm.store %12, %13 : i32, !llvm.ptr
+ ptr.store %12, %13 : i32, !llvm.ptr
omp.terminator
}
llvm.return
diff --git a/mlir/test/Target/LLVMIR/omptarget-byref-bycopy-generation-device.mlir b/mlir/test/Target/LLVMIR/omptarget-byref-bycopy-generation-device.mlir
index 875d04f584ca96..a1a2785e54b0cd 100644
--- a/mlir/test/Target/LLVMIR/omptarget-byref-bycopy-generation-device.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-byref-bycopy-generation-device.mlir
@@ -8,8 +8,8 @@ module attributes {omp.is_target_device = true} {
%3 = omp.map_info var_ptr(%0 : !llvm.ptr, i32) map_clauses(to) capture(ByCopy) -> !llvm.ptr {name = "i"}
omp.target map_entries(%2 -> %arg0, %3 -> %arg1 : !llvm.ptr, !llvm.ptr) {
^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr):
- %4 = llvm.load %arg1 : !llvm.ptr -> i32
- llvm.store %4, %arg0 : i32, !llvm.ptr
+ %4 = ptr.load %arg1 : !llvm.ptr -> i32
+ ptr.store %4, %arg0 : i32, !llvm.ptr
omp.terminator
}
llvm.return
diff --git a/mlir/test/Target/LLVMIR/omptarget-byref-bycopy-generation-host.mlir b/mlir/test/Target/LLVMIR/omptarget-byref-bycopy-generation-host.mlir
index c8fb4e232f06f5..09e88dfb221dc8 100644
--- a/mlir/test/Target/LLVMIR/omptarget-byref-bycopy-generation-host.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-byref-bycopy-generation-host.mlir
@@ -8,8 +8,8 @@ module attributes {omp.is_target_device = false} {
%3 = omp.map_info var_ptr(%0 : !llvm.ptr, i32) map_clauses(to) capture(ByCopy) -> !llvm.ptr {name = "i"}
omp.target map_entries(%2 -> %arg0, %3 -> %arg1 : !llvm.ptr, !llvm.ptr) {
^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr):
- %4 = llvm.load %arg1 : !llvm.ptr -> i32
- llvm.store %4, %arg0 : i32, !llvm.ptr
+ %4 = ptr.load %arg1 : !llvm.ptr -> i32
+ ptr.store %4, %arg0 : i32, !llvm.ptr
omp.terminator
}
llvm.return
diff --git a/mlir/test/Target/LLVMIR/omptarget-constant-indexing-device-region.mlir b/mlir/test/Target/LLVMIR/omptarget-constant-indexing-device-region.mlir
index 5c0ac79271a693..22ffae5eab1317 100644
--- a/mlir/test/Target/LLVMIR/omptarget-constant-indexing-device-region.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-constant-indexing-device-region.mlir
@@ -14,11 +14,11 @@ module attributes {omp.is_target_device = true} {
%7 = llvm.mlir.constant(20 : i32) : i32
%8 = llvm.mlir.constant(0 : i64) : i64
%9 = llvm.getelementptr %arg0[0, %8] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.array<10 x i32>
- llvm.store %7, %9 : i32, !llvm.ptr
+ ptr.store %7, %9 : i32, !llvm.ptr
%10 = llvm.mlir.constant(10 : i32) : i32
%11 = llvm.mlir.constant(4 : i64) : i64
%12 = llvm.getelementptr %arg0[0, %11] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.array<10 x i32>
- llvm.store %10, %12 : i32, !llvm.ptr
+ ptr.store %10, %12 : i32, !llvm.ptr
omp.terminator
}
llvm.return
diff --git a/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-device.mlir b/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-device.mlir
index cf08761981fb3a..4cca77bfe64d76 100644
--- a/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-device.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-device.mlir
@@ -25,7 +25,7 @@ module attributes {omp.is_target_device = true} {
omp.target map_entries(%map -> %arg0 : !llvm.ptr) {
^bb0(%arg0: !llvm.ptr):
%1 = llvm.mlir.constant(1 : i32) : i32
- llvm.store %1, %arg0 : i32, !llvm.ptr
+ ptr.store %1, %arg0 : i32, !llvm.ptr
omp.terminator
}
diff --git a/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-host.mlir b/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-host.mlir
index 2baa20010d0558..5078ef8f54d1f8 100644
--- a/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-host.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-declare-target-llvm-host.mlir
@@ -143,7 +143,7 @@ module attributes {llvm.target_triple = "x86_64-unknown-linux-gnu", omp.is_targe
%1 = llvm.mlir.constant(9 : i32) : i32
%2 = llvm.mlir.zero : !llvm.ptr
%3 = llvm.getelementptr %2[1] : (!llvm.ptr) -> !llvm.ptr, i32
- %4 = llvm.ptrtoint %3 : !llvm.ptr to i64
+ %4 = ptr.ptrtoint %3 : !llvm.ptr to i64
%5 = llvm.mlir.undef : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>
%6 = llvm.insertvalue %4, %5[1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>
%7 = llvm.mlir.constant(20180515 : i32) : i32
diff --git a/mlir/test/Target/LLVMIR/omptarget-llvm.mlir b/mlir/test/Target/LLVMIR/omptarget-llvm.mlir
index b089d47f795df3..3f1429fd726481 100644
--- a/mlir/test/Target/LLVMIR/omptarget-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-llvm.mlir
@@ -6,7 +6,7 @@ llvm.func @_QPopenmp_target_data() {
%2 = omp.map_info var_ptr(%1 : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
omp.target_data map_entries(%2 : !llvm.ptr) {
%3 = llvm.mlir.constant(99 : i32) : i32
- llvm.store %3, %1 : i32, !llvm.ptr
+ ptr.store %3, %1 : i32, !llvm.ptr
omp.terminator
}
llvm.return
@@ -51,7 +51,7 @@ llvm.func @_QPopenmp_target_data_region(%0 : !llvm.ptr) {
%9 = llvm.mlir.constant(1 : i64) : i64
%10 = llvm.mlir.constant(0 : i64) : i64
%11 = llvm.getelementptr %0[0, %10] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.array<1024 x i32>
- llvm.store %7, %11 : i32, !llvm.ptr
+ ptr.store %7, %11 : i32, !llvm.ptr
omp.terminator
}
llvm.return
@@ -90,13 +90,13 @@ llvm.func @_QPomp_target_enter_exit(%1 : !llvm.ptr, %3 : !llvm.ptr) {
%6 = llvm.mlir.constant(1 : i64) : i64
%7 = llvm.alloca %6 x i32 {bindc_name = "i", in_type = i32, operandSegmentSizes = array<i32: 0, 0>, uniq_name = "_QFomp_target_enter_exitEi"} : (i64) -> !llvm.ptr
%8 = llvm.mlir.constant(5 : i32) : i32
- llvm.store %8, %7 : i32, !llvm.ptr
+ ptr.store %8, %7 : i32, !llvm.ptr
%9 = llvm.mlir.constant(2 : i32) : i32
- llvm.store %9, %5 : i32, !llvm.ptr
- %10 = llvm.load %7 : !llvm.ptr -> i32
+ ptr.store %9, %5 : i32, !llvm.ptr
+ %10 = ptr.load %7 : !llvm.ptr -> i32
%11 = llvm.mlir.constant(10 : i32) : i32
%12 = llvm.icmp "slt" %10, %11 : i32
- %13 = llvm.load %5 : !llvm.ptr -> i32
+ %13 = ptr.load %5 : !llvm.ptr -> i32
%14 = llvm.mlir.constant(1023 : index) : i64
%15 = llvm.mlir.constant(0 : index) : i64
%16 = llvm.mlir.constant(1024 : index) : i64
@@ -110,10 +110,10 @@ llvm.func @_QPomp_target_enter_exit(%1 : !llvm.ptr, %3 : !llvm.ptr) {
%23 = omp.bounds lower_bound(%20 : i64) upper_bound(%19 : i64) extent(%21 : i64) stride(%22 : i64) start_idx(%22 : i64)
%map2 = omp.map_info var_ptr(%3 : !llvm.ptr, !llvm.array<512 x i32>) map_clauses(exit_release_or_enter_alloc) capture(ByRef) bounds(%23) -> !llvm.ptr {name = ""}
omp.target_enter_data if(%12 : i1) device(%13 : i32) map_entries(%map1, %map2 : !llvm.ptr, !llvm.ptr)
- %24 = llvm.load %7 : !llvm.ptr -> i32
+ %24 = ptr.load %7 : !llvm.ptr -> i32
%25 = llvm.mlir.constant(10 : i32) : i32
%26 = llvm.icmp "sgt" %24, %25 : i32
- %27 = llvm.load %5 : !llvm.ptr -> i32
+ %27 = ptr.load %5 : !llvm.ptr -> i32
%28 = llvm.mlir.constant(1023 : index) : i64
%29 = llvm.mlir.constant(0 : index) : i64
%30 = llvm.mlir.constant(1024 : index) : i64
@@ -207,8 +207,8 @@ llvm.func @_QPopenmp_target_use_dev_ptr() {
omp.target_data map_entries(%map1 : !llvm.ptr) use_device_ptr(%a : !llvm.ptr) {
^bb0(%arg0: !llvm.ptr):
%1 = llvm.mlir.constant(10 : i32) : i32
- %2 = llvm.load %arg0 : !llvm.ptr -> !llvm.ptr
- llvm.store %1, %2 : i32, !llvm.ptr
+ %2 = ptr.load %arg0 : !llvm.ptr -> !llvm.ptr
+ ptr.store %1, %2 : i32, !llvm.ptr
omp.terminator
}
llvm.return
@@ -251,8 +251,8 @@ llvm.func @_QPopenmp_target_use_dev_addr() {
omp.target_data map_entries(%map : !llvm.ptr) use_device_addr(%a : !llvm.ptr) {
^bb0(%arg0: !llvm.ptr):
%1 = llvm.mlir.constant(10 : i32) : i32
- %2 = llvm.load %arg0 : !llvm.ptr -> !llvm.ptr
- llvm.store %1, %2 : i32, !llvm.ptr
+ %2 = ptr.load %arg0 : !llvm.ptr -> !llvm.ptr
+ ptr.store %1, %2 : i32, !llvm.ptr
omp.terminator
}
llvm.return
@@ -293,7 +293,7 @@ llvm.func @_QPopenmp_target_use_dev_addr_no_ptr() {
omp.target_data map_entries(%map : !llvm.ptr) use_device_addr(%a : !llvm.ptr) {
^bb0(%arg0: !llvm.ptr):
%1 = llvm.mlir.constant(10 : i32) : i32
- llvm.store %1, %arg0 : i32, !llvm.ptr
+ ptr.store %1, %arg0 : i32, !llvm.ptr
omp.terminator
}
llvm.return
@@ -335,11 +335,11 @@ llvm.func @_QPopenmp_target_use_dev_addr_nomap() {
omp.target_data map_entries(%map : !llvm.ptr) use_device_addr(%a : !llvm.ptr) {
^bb0(%arg0: !llvm.ptr):
%2 = llvm.mlir.constant(10 : i32) : i32
- %3 = llvm.load %arg0 : !llvm.ptr -> !llvm.ptr
- llvm.store %2, %3 : i32, !llvm.ptr
+ %3 = ptr.load %arg0 : !llvm.ptr -> !llvm.ptr
+ ptr.store %2, %3 : i32, !llvm.ptr
%4 = llvm.mlir.constant(20 : i32) : i32
- %5 = llvm.load %b : !llvm.ptr -> !llvm.ptr
- llvm.store %4, %5 : i32, !llvm.ptr
+ %5 = ptr.load %b : !llvm.ptr -> !llvm.ptr
+ ptr.store %4, %5 : i32, !llvm.ptr
omp.terminator
}
llvm.return
@@ -392,11 +392,11 @@ llvm.func @_QPopenmp_target_use_dev_both() {
omp.target_data map_entries(%map, %map1 : !llvm.ptr, !llvm.ptr) use_device_ptr(%a : !llvm.ptr) use_device_addr(%b : !llvm.ptr) {
^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr):
%2 = llvm.mlir.constant(10 : i32) : i32
- %3 = llvm.load %arg0 : !llvm.ptr -> !llvm.ptr
- llvm.store %2, %3 : i32, !llvm.ptr
+ %3 = ptr.load %arg0 : !llvm.ptr -> !llvm.ptr
+ ptr.store %2, %3 : i32, !llvm.ptr
%4 = llvm.mlir.constant(20 : i32) : i32
- %5 = llvm.load %arg1 : !llvm.ptr -> !llvm.ptr
- llvm.store %4, %5 : i32, !llvm.ptr
+ %5 = ptr.load %arg1 : !llvm.ptr -> !llvm.ptr
+ ptr.store %4, %5 : i32, !llvm.ptr
omp.terminator
}
llvm.return
@@ -448,7 +448,7 @@ llvm.func @_QPopenmp_target_data_update() {
%2 = omp.map_info var_ptr(%1 : !llvm.ptr, i32) map_clauses(to) capture(ByRef) -> !llvm.ptr {name = ""}
omp.target_data map_entries(%2 : !llvm.ptr) {
%3 = llvm.mlir.constant(99 : i32) : i32
- llvm.store %3, %1 : i32, !llvm.ptr
+ ptr.store %3, %1 : i32, !llvm.ptr
omp.terminator
}
diff --git a/mlir/test/Target/LLVMIR/omptarget-parallel-llvm.mlir b/mlir/test/Target/LLVMIR/omptarget-parallel-llvm.mlir
index a21e6d61a56185..4b9d9a5ee6a7ba 100644
--- a/mlir/test/Target/LLVMIR/omptarget-parallel-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-parallel-llvm.mlir
@@ -10,7 +10,7 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<"dlti.alloca_memo
^bb0(%arg2: !llvm.ptr):
omp.parallel {
%1 = llvm.mlir.constant(1 : i32) : i32
- llvm.store %1, %arg2 : i32, !llvm.ptr
+ ptr.store %1, %arg2 : i32, !llvm.ptr
omp.terminator
}
omp.terminator
@@ -25,7 +25,7 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<"dlti.alloca_memo
%1 = llvm.mlir.constant(156 : i32) : i32
omp.parallel num_threads(%1 : i32) {
%2 = llvm.mlir.constant(1 : i32) : i32
- llvm.store %2, %arg2 : i32, !llvm.ptr
+ ptr.store %2, %arg2 : i32, !llvm.ptr
omp.terminator
}
omp.terminator
@@ -41,11 +41,11 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<"dlti.alloca_memo
omp.target map_entries(%2 -> %arg1, %3 -> %arg2 : !llvm.ptr, !llvm.ptr) {
^bb0(%arg1: !llvm.ptr, %arg2: !llvm.ptr):
%4 = llvm.mlir.constant(10 : i32) : i32
- %5 = llvm.load %arg2 : !llvm.ptr -> i32
+ %5 = ptr.load %arg2 : !llvm.ptr -> i32
%6 = llvm.mlir.constant(0 : i64) : i32
%7 = llvm.icmp "ne" %5, %6 : i32
omp.parallel if(%7 : i1) {
- llvm.store %4, %arg1 : i32, !llvm.ptr
+ ptr.store %4, %arg1 : i32, !llvm.ptr
omp.terminator
}
omp.terminator
diff --git a/mlir/test/Target/LLVMIR/omptarget-parallel-wsloop.mlir b/mlir/test/Target/LLVMIR/omptarget-parallel-wsloop.mlir
index 43d0934d3a931e..b98f556d8c6ffe 100644
--- a/mlir/test/Target/LLVMIR/omptarget-parallel-wsloop.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-parallel-wsloop.mlir
@@ -11,7 +11,7 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<"dlti.alloca_memo
%loop_step = llvm.mlir.constant(1 : i32) : i32
omp.wsloop for (%loop_cnt) : i32 = (%loop_lb) to (%loop_ub) inclusive step (%loop_step) {
%gep = llvm.getelementptr %arg0[0, %loop_cnt] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.array<10 x i32>
- llvm.store %loop_cnt, %gep : i32, !llvm.ptr
+ ptr.store %loop_cnt, %gep : i32, !llvm.ptr
omp.yield
}
omp.terminator
diff --git a/mlir/test/Target/LLVMIR/omptarget-region-device-llvm.mlir b/mlir/test/Target/LLVMIR/omptarget-region-device-llvm.mlir
index f537eb5c3f572b..b7f056ccd2225b 100644
--- a/mlir/test/Target/LLVMIR/omptarget-region-device-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-region-device-llvm.mlir
@@ -10,17 +10,17 @@ module attributes {omp.is_target_device = true} {
%5 = llvm.alloca %4 x i32 {bindc_name = "b", in_type = i32, operandSegmentSizes = array<i32: 0, 0>, uniq_name = "_QFomp_target_regionEb"} : (i64) -> !llvm.ptr
%6 = llvm.mlir.constant(1 : i64) : i64
%7 = llvm.alloca %6 x i32 {bindc_name = "c", in_type = i32, operandSegmentSizes = array<i32: 0, 0>, uniq_name = "_QFomp_target_regionEc"} : (i64) -> !llvm.ptr
- llvm.store %1, %3 : i32, !llvm.ptr
- llvm.store %0, %5 : i32, !llvm.ptr
+ ptr.store %1, %3 : i32, !llvm.ptr
+ ptr.store %0, %5 : i32, !llvm.ptr
%map1 = omp.map_info var_ptr(%3 : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
%map2 = omp.map_info var_ptr(%5 : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
%map3 = omp.map_info var_ptr(%7 : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
omp.target map_entries(%map1 -> %arg0, %map2 -> %arg1, %map3 -> %arg2 : !llvm.ptr, !llvm.ptr, !llvm.ptr) {
^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr):
- %8 = llvm.load %arg0 : !llvm.ptr -> i32
- %9 = llvm.load %arg1 : !llvm.ptr -> i32
+ %8 = ptr.load %arg0 : !llvm.ptr -> i32
+ %9 = ptr.load %arg1 : !llvm.ptr -> i32
%10 = llvm.add %8, %9 : i32
- llvm.store %10, %arg2 : i32, !llvm.ptr
+ ptr.store %10, %arg2 : i32, !llvm.ptr
omp.terminator
}
llvm.return
diff --git a/mlir/test/Target/LLVMIR/omptarget-region-llvm-target-device.mlir b/mlir/test/Target/LLVMIR/omptarget-region-llvm-target-device.mlir
index 6fa039f522e206..e40b98925492c3 100644
--- a/mlir/test/Target/LLVMIR/omptarget-region-llvm-target-device.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-region-llvm-target-device.mlir
@@ -10,8 +10,8 @@ module attributes {omp.is_target_device = true} {
^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr):
%2 = llvm.mlir.constant(20 : i32) : i32
%3 = llvm.mlir.constant(10 : i32) : i32
- llvm.store %3, %arg0 : i32, !llvm.ptr
- llvm.store %2, %arg1 : i32, !llvm.ptr
+ ptr.store %3, %arg0 : i32, !llvm.ptr
+ ptr.store %2, %arg1 : i32, !llvm.ptr
omp.terminator
}
llvm.return
diff --git a/mlir/test/Target/LLVMIR/omptarget-region-llvm.mlir b/mlir/test/Target/LLVMIR/omptarget-region-llvm.mlir
index b861dd7a7d315f..c0c58e46957473 100644
--- a/mlir/test/Target/LLVMIR/omptarget-region-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-region-llvm.mlir
@@ -10,17 +10,17 @@ module attributes {omp.is_target_device = false} {
%5 = llvm.alloca %4 x i32 {bindc_name = "b", in_type = i32, operandSegmentSizes = array<i32: 0, 0>, uniq_name = "_QFomp_target_regionEb"} : (i64) -> !llvm.ptr
%6 = llvm.mlir.constant(1 : i64) : i64
%7 = llvm.alloca %6 x i32 {bindc_name = "c", in_type = i32, operandSegmentSizes = array<i32: 0, 0>, uniq_name = "_QFomp_target_regionEc"} : (i64) -> !llvm.ptr
- llvm.store %1, %3 : i32, !llvm.ptr
- llvm.store %0, %5 : i32, !llvm.ptr
+ ptr.store %1, %3 : i32, !llvm.ptr
+ ptr.store %0, %5 : i32, !llvm.ptr
%map1 = omp.map_info var_ptr(%3 : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
%map2 = omp.map_info var_ptr(%5 : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
%map3 = omp.map_info var_ptr(%7 : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
omp.target map_entries(%map1 -> %arg0, %map2 -> %arg1, %map3 -> %arg2 : !llvm.ptr, !llvm.ptr, !llvm.ptr) {
^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr):
- %8 = llvm.load %arg0 : !llvm.ptr -> i32
- %9 = llvm.load %arg1 : !llvm.ptr -> i32
+ %8 = ptr.load %arg0 : !llvm.ptr -> i32
+ %9 = ptr.load %arg1 : !llvm.ptr -> i32
%10 = llvm.add %8, %9 : i32
- llvm.store %10, %arg2 : i32, !llvm.ptr
+ ptr.store %10, %arg2 : i32, !llvm.ptr
omp.terminator
}
llvm.return
diff --git a/mlir/test/Target/LLVMIR/omptarget-region-parallel-llvm.mlir b/mlir/test/Target/LLVMIR/omptarget-region-parallel-llvm.mlir
index c80ea1f0a47be7..a02666738e6980 100644
--- a/mlir/test/Target/LLVMIR/omptarget-region-parallel-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-region-parallel-llvm.mlir
@@ -10,18 +10,18 @@ module attributes {omp.is_target_device = false} {
%5 = llvm.alloca %4 x i32 {bindc_name = "b", in_type = i32, operandSegmentSizes = array<i32: 0, 0>, uniq_name = "_QFomp_target_regionEb"} : (i64) -> !llvm.ptr
%6 = llvm.mlir.constant(1 : i64) : i64
%7 = llvm.alloca %6 x i32 {bindc_name = "c", in_type = i32, operandSegmentSizes = array<i32: 0, 0>, uniq_name = "_QFomp_target_regionEc"} : (i64) -> !llvm.ptr
- llvm.store %1, %3 : i32, !llvm.ptr
- llvm.store %0, %5 : i32, !llvm.ptr
+ ptr.store %1, %3 : i32, !llvm.ptr
+ ptr.store %0, %5 : i32, !llvm.ptr
%map1 = omp.map_info var_ptr(%3 : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
%map2 = omp.map_info var_ptr(%5 : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
%map3 = omp.map_info var_ptr(%7 : !llvm.ptr, i32) map_clauses(tofrom) capture(ByRef) -> !llvm.ptr {name = ""}
omp.target map_entries( %map1 -> %arg0, %map2 -> %arg1, %map3 -> %arg2 : !llvm.ptr, !llvm.ptr, !llvm.ptr) {
^bb0(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr):
omp.parallel {
- %8 = llvm.load %arg0 : !llvm.ptr -> i32
- %9 = llvm.load %arg1 : !llvm.ptr -> i32
+ %8 = ptr.load %arg0 : !llvm.ptr -> i32
+ %9 = ptr.load %arg1 : !llvm.ptr -> i32
%10 = llvm.add %8, %9 : i32
- llvm.store %10, %arg2 : i32, !llvm.ptr
+ ptr.store %10, %arg2 : i32, !llvm.ptr
omp.terminator
}
omp.terminator
diff --git a/mlir/test/Target/LLVMIR/omptarget-wsloop.mlir b/mlir/test/Target/LLVMIR/omptarget-wsloop.mlir
index 220eb85b3483ec..fa078c269fe652 100644
--- a/mlir/test/Target/LLVMIR/omptarget-wsloop.mlir
+++ b/mlir/test/Target/LLVMIR/omptarget-wsloop.mlir
@@ -10,7 +10,7 @@ module attributes {dlti.dl_spec = #dlti.dl_spec<#dlti.dl_entry<"dlti.alloca_memo
%loop_step = llvm.mlir.constant(1 : i32) : i32
omp.wsloop for (%loop_cnt) : i32 = (%loop_lb) to (%loop_ub) inclusive step (%loop_step) {
%gep = llvm.getelementptr %arg0[0, %loop_cnt] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.array<10 x i32>
- llvm.store %loop_cnt, %gep : i32, !llvm.ptr
+ ptr.store %loop_cnt, %gep : i32, !llvm.ptr
omp.yield
}
llvm.return
diff --git a/mlir/test/Target/LLVMIR/opaque-ptr.mlir b/mlir/test/Target/LLVMIR/opaque-ptr.mlir
index c21f9b0542debc..30a1e5e7207668 100644
--- a/mlir/test/Target/LLVMIR/opaque-ptr.mlir
+++ b/mlir/test/Target/LLVMIR/opaque-ptr.mlir
@@ -3,21 +3,21 @@
// CHECK-LABEL: @opaque_ptr_load
llvm.func @opaque_ptr_load(%arg0: !llvm.ptr) -> i32 {
// CHECK: load i32, ptr %{{.*}}
- %0 = llvm.load %arg0 : !llvm.ptr -> i32
+ %0 = ptr.load %arg0 : !llvm.ptr -> i32
llvm.return %0 : i32
}
// CHECK-LABEL: @opaque_ptr_store
llvm.func @opaque_ptr_store(%arg0: i32, %arg1: !llvm.ptr){
// CHECK: store i32 %{{.*}}, ptr %{{.*}}
- llvm.store %arg0, %arg1 : i32, !llvm.ptr
+ ptr.store %arg0, %arg1 : i32, !llvm.ptr
llvm.return
}
// CHECK-LABEL: @opaque_ptr_ptr_store
llvm.func @opaque_ptr_ptr_store(%arg0: !llvm.ptr, %arg1: !llvm.ptr) {
// CHECK: store ptr %{{.*}}, ptr %{{.*}}
- llvm.store %arg0, %arg1 : !llvm.ptr, !llvm.ptr
+ ptr.store %arg0, %arg1 : !llvm.ptr, !llvm.ptr
llvm.return
}
diff --git a/mlir/test/Target/LLVMIR/openacc-llvm.mlir b/mlir/test/Target/LLVMIR/openacc-llvm.mlir
index 897311c6e81bea..34fc4c16dce388 100644
--- a/mlir/test/Target/LLVMIR/openacc-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/openacc-llvm.mlir
@@ -136,7 +136,7 @@ llvm.func @testdataop(%arg0: !llvm.ptr, %arg1: !llvm.ptr, %arg2: !llvm.ptr) {
%1 = acc.create varPtr(%arg1 : !llvm.ptr) -> !llvm.ptr
acc.data dataOperands(%0, %1 : !llvm.ptr, !llvm.ptr) {
%9 = llvm.mlir.constant(2 : i32) : i32
- llvm.store %9, %arg2 : i32, !llvm.ptr
+ ptr.store %9, %arg2 : i32, !llvm.ptr
acc.terminator
}
acc.copyout accPtr(%0 : !llvm.ptr) to varPtr(%arg0 : !llvm.ptr)
diff --git a/mlir/test/Target/LLVMIR/openmp-llvm-invalid.mlir b/mlir/test/Target/LLVMIR/openmp-llvm-invalid.mlir
index 3c6ca1ef0c6bf5..8dfd62853c468c 100644
--- a/mlir/test/Target/LLVMIR/openmp-llvm-invalid.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-llvm-invalid.mlir
@@ -78,14 +78,14 @@ llvm.func @omp_threadprivate() {
// expected-error @below {{LLVM Translation failed for operation: omp.threadprivate}}
%5 = omp.threadprivate %4 : !llvm.ptr -> !llvm.ptr
- llvm.store %1, %5 : i32, !llvm.ptr
+ ptr.store %1, %5 : i32, !llvm.ptr
omp.parallel {
%6 = omp.threadprivate %4 : !llvm.ptr -> !llvm.ptr
- llvm.store %2, %6 : i32, !llvm.ptr
+ ptr.store %2, %6 : i32, !llvm.ptr
omp.terminator
}
- llvm.store %3, %5 : i32, !llvm.ptr
+ ptr.store %3, %5 : i32, !llvm.ptr
llvm.return
}
diff --git a/mlir/test/Target/LLVMIR/openmp-llvm.mlir b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
index 1c02c0265462c2..381f5535c043be 100644
--- a/mlir/test/Target/LLVMIR/openmp-llvm.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-llvm.mlir
@@ -35,7 +35,7 @@ llvm.func @test_flush_construct(%arg0: !llvm.ptr) {
// CHECK: call void @__kmpc_flush(ptr @{{[0-9]+}}
omp.flush
// CHECK: load i32, ptr
- %2 = llvm.load %1 : !llvm.ptr -> i32
+ %2 = ptr.load %1 : !llvm.ptr -> i32
// CHECK-NEXT: ret void
llvm.return
@@ -307,7 +307,7 @@ llvm.func @wsloop_simple(%arg0: !llvm.ptr) {
// CHECK: call void @__kmpc_for_static_init_{{.*}}(ptr @[[$loc_struct]],
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
%4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %3, %4 : f32, !llvm.ptr
+ ptr.store %3, %4 : f32, !llvm.ptr
omp.yield
// CHECK: call void @__kmpc_for_static_fini(ptr @[[$loc_struct]],
}) {operandSegmentSizes = array<i32: 1, 1, 1, 0, 0, 0, 0>} : (i64, i64, i64) -> ()
@@ -328,7 +328,7 @@ llvm.func @wsloop_inclusive_1(%arg0: !llvm.ptr) {
^bb0(%arg1: i64):
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
%4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %3, %4 : f32, !llvm.ptr
+ ptr.store %3, %4 : f32, !llvm.ptr
omp.yield
}) {operandSegmentSizes = array<i32: 1, 1, 1, 0, 0, 0, 0>} : (i64, i64, i64) -> ()
llvm.return
@@ -346,7 +346,7 @@ llvm.func @wsloop_inclusive_2(%arg0: !llvm.ptr) {
^bb0(%arg1: i64):
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
%4 = llvm.getelementptr %arg0[%arg1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %3, %4 : f32, !llvm.ptr
+ ptr.store %3, %4 : f32, !llvm.ptr
omp.yield
}) {inclusive, operandSegmentSizes = array<i32: 1, 1, 1, 0, 0, 0, 0>} : (i64, i64, i64) -> ()
llvm.return
@@ -444,7 +444,7 @@ llvm.func @body(i32)
llvm.func @test_omp_wsloop_dynamic_chunk_var(%lb : i32, %ub : i32, %step : i32) -> () {
%1 = llvm.mlir.constant(1 : i64) : i64
%chunk_size_alloca = llvm.alloca %1 x i16 {bindc_name = "chunk_size", in_type = i16, uniq_name = "_QFsub1Echunk_size"} : (i64) -> !llvm.ptr
- %chunk_size_var = llvm.load %chunk_size_alloca : !llvm.ptr -> i16
+ %chunk_size_var = ptr.load %chunk_size_alloca : !llvm.ptr -> i16
omp.wsloop schedule(dynamic = %chunk_size_var : i16)
for (%iv) : i32 = (%lb) to (%ub) step (%step) {
// CHECK: %[[CHUNK_SIZE:.*]] = sext i16 %{{.*}} to i32
@@ -465,7 +465,7 @@ llvm.func @body(i32)
llvm.func @test_omp_wsloop_dynamic_chunk_var2(%lb : i32, %ub : i32, %step : i32) -> () {
%1 = llvm.mlir.constant(1 : i64) : i64
%chunk_size_alloca = llvm.alloca %1 x i64 {bindc_name = "chunk_size", in_type = i64, uniq_name = "_QFsub1Echunk_size"} : (i64) -> !llvm.ptr
- %chunk_size_var = llvm.load %chunk_size_alloca : !llvm.ptr -> i64
+ %chunk_size_var = ptr.load %chunk_size_alloca : !llvm.ptr -> i64
omp.wsloop schedule(dynamic = %chunk_size_var : i64)
for (%iv) : i32 = (%lb) to (%ub) step (%step) {
// CHECK: %[[CHUNK_SIZE:.*]] = trunc i64 %{{.*}} to i32
@@ -626,7 +626,7 @@ llvm.func @simdloop_simple(%lb : i64, %ub : i64, %step : i64, %arg0: !llvm.ptr)
// tested there. Just check that the right metadata is added.
// CHECK: llvm.access.group
%4 = llvm.getelementptr %arg0[%iv] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %3, %4 : f32, !llvm.ptr
+ ptr.store %3, %4 : f32, !llvm.ptr
omp.yield
}) {operandSegmentSizes = array<i32: 1,1,1,0,0,0>} :
(i64, i64, i64) -> ()
@@ -663,8 +663,8 @@ llvm.func @simdloop_simple_multiple(%lb1 : i64, %ub1 : i64, %step1 : i64, %lb2 :
// CHECK-NEXT: llvm.access.group
%4 = llvm.getelementptr %arg0[%iv1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
%5 = llvm.getelementptr %arg1[%iv2] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %3, %4 : f32, !llvm.ptr
- llvm.store %3, %5 : f32, !llvm.ptr
+ ptr.store %3, %4 : f32, !llvm.ptr
+ ptr.store %3, %5 : f32, !llvm.ptr
omp.yield
}
llvm.return
@@ -684,8 +684,8 @@ llvm.func @simdloop_simple_multiple_simdlen(%lb1 : i64, %ub1 : i64, %step1 : i64
// CHECK-NEXT: llvm.access.group
%4 = llvm.getelementptr %arg0[%iv1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
%5 = llvm.getelementptr %arg1[%iv2] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %3, %4 : f32, !llvm.ptr
- llvm.store %3, %5 : f32, !llvm.ptr
+ ptr.store %3, %4 : f32, !llvm.ptr
+ ptr.store %3, %5 : f32, !llvm.ptr
omp.yield
}
llvm.return
@@ -702,8 +702,8 @@ llvm.func @simdloop_simple_multiple_safelen(%lb1 : i64, %ub1 : i64, %step1 : i64
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
%4 = llvm.getelementptr %arg0[%iv1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
%5 = llvm.getelementptr %arg1[%iv2] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %3, %4 : f32, !llvm.ptr
- llvm.store %3, %5 : f32, !llvm.ptr
+ ptr.store %3, %4 : f32, !llvm.ptr
+ ptr.store %3, %5 : f32, !llvm.ptr
omp.yield
}
llvm.return
@@ -719,8 +719,8 @@ llvm.func @simdloop_simple_multiple_simdlen_safelen(%lb1 : i64, %ub1 : i64, %ste
%3 = llvm.mlir.constant(2.000000e+00 : f32) : f32
%4 = llvm.getelementptr %arg0[%iv1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
%5 = llvm.getelementptr %arg1[%iv2] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %3, %4 : f32, !llvm.ptr
- llvm.store %3, %5 : f32, !llvm.ptr
+ ptr.store %3, %4 : f32, !llvm.ptr
+ ptr.store %3, %5 : f32, !llvm.ptr
omp.yield
}
llvm.return
@@ -737,16 +737,16 @@ llvm.func @simdloop_if(%arg0: !llvm.ptr {fir.bindc_name = "n"}, %arg1: !llvm.ptr
%2 = llvm.mlir.constant(1 : i64) : i64
%3 = llvm.alloca %2 x i32 {bindc_name = "i", in_type = i32, operandSegmentSizes = array<i32: 0, 0>, uniq_name = "_QFtest_simdEi"} : (i64) -> !llvm.ptr
%4 = llvm.mlir.constant(0 : i32) : i32
- %5 = llvm.load %arg0 : !llvm.ptr -> i32
+ %5 = ptr.load %arg0 : !llvm.ptr -> i32
%6 = llvm.mlir.constant(1 : i32) : i32
- %7 = llvm.load %arg0 : !llvm.ptr -> i32
- %8 = llvm.load %arg1 : !llvm.ptr -> i32
+ %7 = ptr.load %arg0 : !llvm.ptr -> i32
+ %8 = ptr.load %arg1 : !llvm.ptr -> i32
%9 = llvm.icmp "sge" %7, %8 : i32
omp.simdloop if(%9) for (%arg2) : i32 = (%4) to (%5) inclusive step (%6) {
// The form of the emitted IR is controlled by OpenMPIRBuilder and
// tested there. Just check that the right metadata is added.
// CHECK: llvm.access.group
- llvm.store %arg2, %1 : i32, !llvm.ptr
+ ptr.store %arg2, %1 : i32, !llvm.ptr
omp.yield
}
llvm.return
@@ -938,7 +938,7 @@ llvm.func @omp_critical(%x : !llvm.ptr, %xval : i32) -> () {
// CHECK: omp.critical.region
omp.critical {
// CHECK: store
- llvm.store %xval, %x : i32, !llvm.ptr
+ ptr.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_.var{{.*}})
@@ -948,7 +948,7 @@ llvm.func @omp_critical(%x : !llvm.ptr, %xval : i32) -> () {
// CHECK: omp.critical.region
omp.critical(@mutex_none) {
// CHECK: store
- llvm.store %xval, %x : i32, !llvm.ptr
+ ptr.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_mutex_none.var{{.*}})
@@ -958,7 +958,7 @@ llvm.func @omp_critical(%x : !llvm.ptr, %xval : i32) -> () {
// CHECK: omp.critical.region
omp.critical(@mutex_uncontended) {
// CHECK: store
- llvm.store %xval, %x : i32, !llvm.ptr
+ ptr.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_mutex_uncontended.var{{.*}})
@@ -968,7 +968,7 @@ llvm.func @omp_critical(%x : !llvm.ptr, %xval : i32) -> () {
// CHECK: omp.critical.region
omp.critical(@mutex_contended) {
// CHECK: store
- llvm.store %xval, %x : i32, !llvm.ptr
+ ptr.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_mutex_contended.var{{.*}})
@@ -978,7 +978,7 @@ llvm.func @omp_critical(%x : !llvm.ptr, %xval : i32) -> () {
// CHECK: omp.critical.region
omp.critical(@mutex_nonspeculative) {
// CHECK: store
- llvm.store %xval, %x : i32, !llvm.ptr
+ ptr.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_mutex_nonspeculative.var{{.*}})
@@ -988,7 +988,7 @@ llvm.func @omp_critical(%x : !llvm.ptr, %xval : i32) -> () {
// CHECK: omp.critical.region
omp.critical(@mutex_nonspeculative_uncontended) {
// CHECK: store
- llvm.store %xval, %x : i32, !llvm.ptr
+ ptr.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_mutex_nonspeculative_uncontended.var{{.*}})
@@ -998,7 +998,7 @@ llvm.func @omp_critical(%x : !llvm.ptr, %xval : i32) -> () {
// CHECK: omp.critical.region
omp.critical(@mutex_nonspeculative_contended) {
// CHECK: store
- llvm.store %xval, %x : i32, !llvm.ptr
+ ptr.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_mutex_nonspeculative_contended.var{{.*}})
@@ -1008,7 +1008,7 @@ llvm.func @omp_critical(%x : !llvm.ptr, %xval : i32) -> () {
// CHECK: omp.critical.region
omp.critical(@mutex_speculative) {
// CHECK: store
- llvm.store %xval, %x : i32, !llvm.ptr
+ ptr.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_mutex_speculative.var{{.*}})
@@ -1018,7 +1018,7 @@ llvm.func @omp_critical(%x : !llvm.ptr, %xval : i32) -> () {
// CHECK: omp.critical.region
omp.critical(@mutex_speculative_uncontended) {
// CHECK: store
- llvm.store %xval, %x : i32, !llvm.ptr
+ ptr.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_mutex_speculative_uncontended.var{{.*}})
@@ -1028,7 +1028,7 @@ llvm.func @omp_critical(%x : !llvm.ptr, %xval : i32) -> () {
// CHECK: omp.critical.region
omp.critical(@mutex_speculative_contended) {
// CHECK: store
- llvm.store %xval, %x : i32, !llvm.ptr
+ ptr.store %xval, %x : i32, !llvm.ptr
omp.terminator
}
// CHECK: call void @__kmpc_end_critical({{.*}}critical_user_mutex_speculative_contended.var{{.*}})
@@ -1084,11 +1084,11 @@ llvm.func @collapse_wsloop(
// CHECK: call void @__kmpc_for_static_init_4u
omp.wsloop
for (%arg0, %arg1, %arg2) : i32 = (%0, %1, %2) to (%3, %4, %5) step (%6, %7, %8) {
- %31 = llvm.load %20 : !llvm.ptr -> i32
+ %31 = ptr.load %20 : !llvm.ptr -> i32
%32 = llvm.add %31, %arg0 : i32
%33 = llvm.add %32, %arg1 : i32
%34 = llvm.add %33, %arg2 : i32
- llvm.store %34, %20 : i32, !llvm.ptr
+ ptr.store %34, %20 : i32, !llvm.ptr
omp.yield
}
omp.terminator
@@ -1145,11 +1145,11 @@ llvm.func @collapse_wsloop_dynamic(
// CHECK: call void @__kmpc_dispatch_init_4u
omp.wsloop schedule(dynamic)
for (%arg0, %arg1, %arg2) : i32 = (%0, %1, %2) to (%3, %4, %5) step (%6, %7, %8) {
- %31 = llvm.load %20 : !llvm.ptr -> i32
+ %31 = ptr.load %20 : !llvm.ptr -> i32
%32 = llvm.add %31, %arg0 : i32
%33 = llvm.add %32, %arg1 : i32
%34 = llvm.add %33, %arg2 : i32
- llvm.store %34, %20 : i32, !llvm.ptr
+ ptr.store %34, %20 : i32, !llvm.ptr
omp.yield
}
omp.terminator
@@ -2064,7 +2064,7 @@ llvm.func @omp_sections(%arg0 : i32, %arg1 : i32, %arg2 : !llvm.ptr) -> () {
%add = llvm.add %arg0, %arg1 : i32
// CHECK: store i32 %{{.*}}, ptr %{{.*}}, align 4
// CHECK: br label %{{.*}}
- llvm.store %add, %arg2 : i32, !llvm.ptr
+ ptr.store %add, %arg2 : i32, !llvm.ptr
omp.terminator
}
omp.terminator
@@ -2117,13 +2117,13 @@ llvm.func @single(%x: i32, %y: i32, %zaddr: !llvm.ptr) {
// CHECK: %[[a:.*]] = sub i32 %[[x]], %[[y]]
%a = llvm.sub %x, %y : i32
// CHECK: store i32 %[[a]], ptr %[[zaddr]]
- llvm.store %a, %zaddr : i32, !llvm.ptr
+ ptr.store %a, %zaddr : i32, !llvm.ptr
// CHECK: call i32 @__kmpc_single
omp.single {
// CHECK: %[[z:.*]] = add i32 %[[x]], %[[y]]
%z = llvm.add %x, %y : i32
// CHECK: store i32 %[[z]], ptr %[[zaddr]]
- llvm.store %z, %zaddr : i32, !llvm.ptr
+ ptr.store %z, %zaddr : i32, !llvm.ptr
// CHECK: call void @__kmpc_end_single
// CHECK: call void @__kmpc_barrier
omp.terminator
@@ -2131,7 +2131,7 @@ llvm.func @single(%x: i32, %y: i32, %zaddr: !llvm.ptr) {
// CHECK: %[[b:.*]] = mul i32 %[[x]], %[[y]]
%b = llvm.mul %x, %y : i32
// CHECK: store i32 %[[b]], ptr %[[zaddr]]
- llvm.store %b, %zaddr : i32, !llvm.ptr
+ ptr.store %b, %zaddr : i32, !llvm.ptr
// CHECK: ret void
llvm.return
}
@@ -2144,13 +2144,13 @@ llvm.func @single_nowait(%x: i32, %y: i32, %zaddr: !llvm.ptr) {
// CHECK: %[[a:.*]] = sub i32 %[[x]], %[[y]]
%a = llvm.sub %x, %y : i32
// CHECK: store i32 %[[a]], ptr %[[zaddr]]
- llvm.store %a, %zaddr : i32, !llvm.ptr
+ ptr.store %a, %zaddr : i32, !llvm.ptr
// CHECK: call i32 @__kmpc_single
omp.single nowait {
// CHECK: %[[z:.*]] = add i32 %[[x]], %[[y]]
%z = llvm.add %x, %y : i32
// CHECK: store i32 %[[z]], ptr %[[zaddr]]
- llvm.store %z, %zaddr : i32, !llvm.ptr
+ ptr.store %z, %zaddr : i32, !llvm.ptr
// CHECK: call void @__kmpc_end_single
// CHECK-NOT: call void @__kmpc_barrier
omp.terminator
@@ -2158,7 +2158,7 @@ llvm.func @single_nowait(%x: i32, %y: i32, %zaddr: !llvm.ptr) {
// CHECK: %[[t:.*]] = mul i32 %[[x]], %[[y]]
%t = llvm.mul %x, %y : i32
// CHECK: store i32 %[[t]], ptr %[[zaddr]]
- llvm.store %t, %zaddr : i32, !llvm.ptr
+ ptr.store %t, %zaddr : i32, !llvm.ptr
// CHECK: ret void
llvm.return
}
@@ -2187,15 +2187,15 @@ llvm.func @omp_threadprivate() {
%3 = llvm.mlir.addressof @_QFsubEx : !llvm.ptr
%4 = omp.threadprivate %3 : !llvm.ptr -> !llvm.ptr
- llvm.store %0, %4 : i32, !llvm.ptr
+ ptr.store %0, %4 : i32, !llvm.ptr
omp.parallel {
%5 = omp.threadprivate %3 : !llvm.ptr -> !llvm.ptr
- llvm.store %1, %5 : i32, !llvm.ptr
+ ptr.store %1, %5 : i32, !llvm.ptr
omp.terminator
}
- llvm.store %2, %4 : i32, !llvm.ptr
+ ptr.store %2, %4 : i32, !llvm.ptr
llvm.return
}
@@ -2214,9 +2214,9 @@ llvm.func @omp_task(%x: i32, %y: i32, %zaddr: !llvm.ptr) {
omp.task {
%n = llvm.mlir.constant(1 : i64) : i64
%valaddr = llvm.alloca %n x i32 : (i64) -> !llvm.ptr
- %val = llvm.load %valaddr : !llvm.ptr -> i32
+ %val = ptr.load %valaddr : !llvm.ptr -> i32
%double = llvm.add %val, %val : i32
- llvm.store %double, %valaddr : i32, !llvm.ptr
+ ptr.store %double, %valaddr : i32, !llvm.ptr
omp.terminator
}
llvm.return
@@ -2258,9 +2258,9 @@ llvm.func @omp_task_with_deps(%zaddr: !llvm.ptr) {
omp.task depend(taskdependin -> %zaddr : !llvm.ptr) {
%n = llvm.mlir.constant(1 : i64) : i64
%valaddr = llvm.alloca %n x i32 : (i64) -> !llvm.ptr
- %val = llvm.load %valaddr : !llvm.ptr -> i32
+ %val = ptr.load %valaddr : !llvm.ptr -> i32
%double = llvm.add %val, %val : i32
- llvm.store %double, %valaddr : i32, !llvm.ptr
+ ptr.store %double, %valaddr : i32, !llvm.ptr
omp.terminator
}
llvm.return
@@ -2289,7 +2289,7 @@ module attributes {llvm.target_triple = "x86_64-unknown-linux-gnu"} {
// CHECK: %[[diff:.+]] = sub i32 %[[x]], %[[y]]
%diff = llvm.sub %x, %y : i32
// CHECK: store i32 %[[diff]], ptr %2
- llvm.store %diff, %zaddr : i32, !llvm.ptr
+ ptr.store %diff, %zaddr : i32, !llvm.ptr
// CHECK: %[[omp_global_thread_num:.+]] = call i32 @__kmpc_global_thread_num({{.+}})
// CHECK: %[[task_data:.+]] = call ptr @__kmpc_omp_task_alloc
// CHECK-SAME: (ptr @{{.+}}, i32 %[[omp_global_thread_num]], i32 1, i64 40, i64 16,
@@ -2299,13 +2299,13 @@ module attributes {llvm.target_triple = "x86_64-unknown-linux-gnu"} {
// CHECK: call i32 @__kmpc_omp_task(ptr @{{.+}}, i32 %[[omp_global_thread_num]], ptr %[[task_data]])
omp.task {
%z = llvm.add %x, %y : i32
- llvm.store %z, %zaddr : i32, !llvm.ptr
+ ptr.store %z, %zaddr : i32, !llvm.ptr
omp.terminator
}
// CHECK: %[[prod:.+]] = mul i32 %[[x]], %[[y]]
%b = llvm.mul %x, %y : i32
// CHECK: store i32 %[[prod]], ptr %[[zaddr]]
- llvm.store %b, %zaddr : i32, !llvm.ptr
+ ptr.store %b, %zaddr : i32, !llvm.ptr
llvm.return
}
}
@@ -2329,7 +2329,7 @@ llvm.func @par_task_(%arg0: !llvm.ptr {fir.bindc_name = "a"}) {
%0 = llvm.mlir.constant(1 : i32) : i32
omp.task {
omp.parallel {
- llvm.store %0, %arg0 : i32, !llvm.ptr
+ ptr.store %0, %arg0 : i32, !llvm.ptr
omp.terminator
}
omp.terminator
diff --git a/mlir/test/Target/LLVMIR/openmp-nested.mlir b/mlir/test/Target/LLVMIR/openmp-nested.mlir
index e1fdfdd24a3cb0..02e80dd49249c1 100644
--- a/mlir/test/Target/LLVMIR/openmp-nested.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-nested.mlir
@@ -14,12 +14,12 @@ module {
omp.wsloop for (%arg2) : i64 = (%2) to (%1) step (%0) {
omp.parallel {
omp.wsloop for (%arg3) : i64 = (%2) to (%0) step (%0) {
- llvm.store %2, %12 : i64, !llvm.ptr
+ ptr.store %2, %12 : i64, !llvm.ptr
omp.yield
}
omp.terminator
}
- %19 = llvm.load %12 : !llvm.ptr -> i64
+ %19 = ptr.load %12 : !llvm.ptr -> i64
%20 = llvm.trunc %19 : i64 to i32
%5 = llvm.mlir.addressof @str0 : !llvm.ptr
%6 = llvm.getelementptr %5[%4, %4] : (!llvm.ptr, i32, i32) -> !llvm.ptr, !llvm.array<29 x i8>
diff --git a/mlir/test/Target/LLVMIR/openmp-reduction.mlir b/mlir/test/Target/LLVMIR/openmp-reduction.mlir
index 93ab578df9e4e8..12323dacfa2d92 100644
--- a/mlir/test/Target/LLVMIR/openmp-reduction.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-reduction.mlir
@@ -16,8 +16,8 @@ combiner {
}
atomic {
^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr):
- %2 = llvm.load %arg3 : !llvm.ptr -> f32
- llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
+ %2 = ptr.load %arg3 : !llvm.ptr -> f32
+ ptr.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
omp.yield
}
@@ -89,8 +89,8 @@ combiner {
}
atomic {
^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr):
- %2 = llvm.load %arg3 : !llvm.ptr -> f32
- llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
+ %2 = ptr.load %arg3 : !llvm.ptr -> f32
+ ptr.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
omp.yield
}
@@ -177,8 +177,8 @@ combiner {
}
atomic {
^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr):
- %2 = llvm.load %arg3 : !llvm.ptr -> f32
- llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
+ %2 = ptr.load %arg3 : !llvm.ptr -> f32
+ ptr.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
omp.yield
}
@@ -260,8 +260,8 @@ combiner {
}
atomic {
^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr):
- %2 = llvm.load %arg3 : !llvm.ptr -> f32
- llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
+ %2 = ptr.load %arg3 : !llvm.ptr -> f32
+ ptr.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
omp.yield
}
@@ -339,8 +339,8 @@ combiner {
}
atomic {
^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr):
- %2 = llvm.load %arg3 : !llvm.ptr -> f32
- llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
+ %2 = ptr.load %arg3 : !llvm.ptr -> f32
+ ptr.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
omp.yield
}
@@ -432,8 +432,8 @@ combiner {
}
atomic {
^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr):
- %2 = llvm.load %arg3 : !llvm.ptr -> f32
- llvm.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
+ %2 = ptr.load %arg3 : !llvm.ptr -> f32
+ ptr.atomicrmw fadd %arg2, %2 monotonic : !llvm.ptr, f32
omp.yield
}
@@ -499,8 +499,8 @@ combiner {
}
atomic {
^bb2(%arg2: !llvm.ptr, %arg3: !llvm.ptr):
- %2 = llvm.load %arg3 : !llvm.ptr -> i32
- llvm.atomicrmw add %arg2, %2 monotonic : !llvm.ptr, i32
+ %2 = ptr.load %arg3 : !llvm.ptr -> i32
+ ptr.atomicrmw add %arg2, %2 monotonic : !llvm.ptr, i32
omp.yield
}
diff --git a/mlir/test/Target/LLVMIR/openmp-teams.mlir b/mlir/test/Target/LLVMIR/openmp-teams.mlir
index a7e579d9db492e..b96a1a786e1cfa 100644
--- a/mlir/test/Target/LLVMIR/openmp-teams.mlir
+++ b/mlir/test/Target/LLVMIR/openmp-teams.mlir
@@ -81,7 +81,7 @@ llvm.func @bar()
// CHECK: ret void
llvm.func @omp_teams_branching_shared(%condition: i1, %arg0: i32, %arg1: f32, %arg2: !llvm.ptr, %arg3: f128) {
%allocated = llvm.call @my_alloca_fn(): () -> !llvm.ptr
- %loaded = llvm.load %allocated : !llvm.ptr -> i32
+ %loaded = ptr.load %allocated : !llvm.ptr -> i32
llvm.br ^codegenBlock
^codegenBlock:
omp.teams {
diff --git a/mlir/test/Target/LLVMIR/target-ext-type.mlir b/mlir/test/Target/LLVMIR/target-ext-type.mlir
index 6b2d2ea3d4c231..f23acbdd64c33a 100644
--- a/mlir/test/Target/LLVMIR/target-ext-type.mlir
+++ b/mlir/test/Target/LLVMIR/target-ext-type.mlir
@@ -14,7 +14,7 @@ llvm.func @func2() -> !llvm.target<"spirv.Event"> {
%0 = llvm.mlir.constant(1 : i32) : i32
%1 = llvm.mlir.poison : !llvm.target<"spirv.Event">
%2 = llvm.alloca %0 x !llvm.target<"spirv.Event"> {alignment = 8 : i64} : (i32) -> !llvm.ptr
- %3 = llvm.load %2 {alignment = 8 : i64} : !llvm.ptr -> !llvm.target<"spirv.Event">
+ %3 = ptr.load %2 {alignment = 8 : i64} : !llvm.ptr -> !llvm.target<"spirv.Event">
llvm.return %1 : !llvm.target<"spirv.Event">
}
diff --git a/mlir/test/mlir-cpu-runner/simple.mlir b/mlir/test/mlir-cpu-runner/simple.mlir
index 38d9dcaf553714..dabc3196eaae59 100644
--- a/mlir/test/mlir-cpu-runner/simple.mlir
+++ b/mlir/test/mlir-cpu-runner/simple.mlir
@@ -44,9 +44,9 @@ llvm.func @foo() -> f32 {
%1 = llvm.mlir.constant(0 : index) : i64
%2 = llvm.mlir.constant(1.234000e+03 : f32) : f32
%3 = llvm.getelementptr %0[%1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- llvm.store %2, %3 : f32, !llvm.ptr
+ ptr.store %2, %3 : f32, !llvm.ptr
%4 = llvm.getelementptr %0[%1] : (!llvm.ptr, i64) -> !llvm.ptr, f32
- %5 = llvm.load %4 : !llvm.ptr -> f32
+ %5 = ptr.load %4 : !llvm.ptr -> f32
llvm.call @deallocation(%0) : (!llvm.ptr) -> ()
llvm.return %5 : f32
}
diff --git a/mlir/test/mlir-cpu-runner/x86-varargs.mlir b/mlir/test/mlir-cpu-runner/x86-varargs.mlir
index de1b723f461e52..89b4a208095460 100644
--- a/mlir/test/mlir-cpu-runner/x86-varargs.mlir
+++ b/mlir/test/mlir-cpu-runner/x86-varargs.mlir
@@ -39,25 +39,25 @@ llvm.func @foo(%arg0: i32, ...) -> i32 {
%13 = llvm.alloca %12 x !llvm.array<1 x struct<"struct.va_list", (i32, i32, ptr, ptr)>> {alignment = 8 : i64} : (i32) -> !llvm.ptr
llvm.intr.vastart %13 : !llvm.ptr
%15 = llvm.getelementptr %13[%11, %10, 0] : (!llvm.ptr, i64, i64) -> !llvm.ptr, !llvm.array<1 x struct<"struct.va_list", (i32, i32, ptr, ptr)>>
- %16 = llvm.load %15 : !llvm.ptr -> i32
+ %16 = ptr.load %15 : !llvm.ptr -> i32
%17 = llvm.icmp "ult" %16, %8 : i32
llvm.cond_br %17, ^bb1, ^bb2
^bb1: // pred: ^bb0
%18 = llvm.getelementptr %13[%7, %6, 3] : (!llvm.ptr, i64, i64) -> !llvm.ptr, !llvm.array<1 x struct<"struct.va_list", (i32, i32, ptr, ptr)>>
- %19 = llvm.load %18 : !llvm.ptr -> !llvm.ptr
+ %19 = ptr.load %18 : !llvm.ptr -> !llvm.ptr
%20 = llvm.zext %16 : i32 to i64
%21 = llvm.getelementptr %19[%20] : (!llvm.ptr, i64) -> !llvm.ptr, i8
%22 = llvm.add %16, %4 : i32
- llvm.store %22, %15 : i32, !llvm.ptr
+ ptr.store %22, %15 : i32, !llvm.ptr
llvm.br ^bb3(%21 : !llvm.ptr)
^bb2: // pred: ^bb0
%23 = llvm.getelementptr %13[%3, %2, 2] : (!llvm.ptr, i64, i64) -> !llvm.ptr, !llvm.array<1 x struct<"struct.va_list", (i32, i32, ptr, ptr)>>
- %24 = llvm.load %23 : !llvm.ptr -> !llvm.ptr
+ %24 = ptr.load %23 : !llvm.ptr -> !llvm.ptr
%25 = llvm.getelementptr %24[%0] : (!llvm.ptr, i64) -> !llvm.ptr, i8
- llvm.store %25, %23 : !llvm.ptr, !llvm.ptr
+ ptr.store %25, %23 : !llvm.ptr, !llvm.ptr
llvm.br ^bb3(%24 : !llvm.ptr)
^bb3(%26: !llvm.ptr): // 2 preds: ^bb1, ^bb2
- %28 = llvm.load %26 : !llvm.ptr -> i32
+ %28 = ptr.load %26 : !llvm.ptr -> i32
llvm.intr.vaend %13 : !llvm.ptr
llvm.return %28 : i32
}
diff --git a/mlir/unittests/ExecutionEngine/Invoke.cpp b/mlir/unittests/ExecutionEngine/Invoke.cpp
index ff87fc9fad805a..426ead32c51509 100644
--- a/mlir/unittests/ExecutionEngine/Invoke.cpp
+++ b/mlir/unittests/ExecutionEngine/Invoke.cpp
@@ -24,6 +24,7 @@
#include "mlir/Pass/PassManager.h"
#include "mlir/Target/LLVMIR/Dialect/Builtin/BuiltinToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h"
+#include "mlir/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Export.h"
#include "llvm/Support/TargetSelect.h"
#include "llvm/Support/raw_ostream.h"
@@ -71,6 +72,7 @@ TEST(MLIRExecutionEngine, SKIP_WITHOUT_JIT(AddInteger)) {
registerAllDialects(registry);
registerBuiltinDialectTranslation(registry);
registerLLVMDialectTranslation(registry);
+ registerPtrDialectTranslation(registry);
MLIRContext context(registry);
OwningOpRef<ModuleOp> module =
parseSourceString<ModuleOp>(moduleStr, &context);
@@ -98,6 +100,7 @@ TEST(MLIRExecutionEngine, SKIP_WITHOUT_JIT(SubtractFloat)) {
registerAllDialects(registry);
registerBuiltinDialectTranslation(registry);
registerLLVMDialectTranslation(registry);
+ registerPtrDialectTranslation(registry);
MLIRContext context(registry);
OwningOpRef<ModuleOp> module =
parseSourceString<ModuleOp>(moduleStr, &context);
@@ -130,6 +133,7 @@ TEST(NativeMemRefJit, SKIP_WITHOUT_JIT(ZeroRankMemref)) {
registerAllDialects(registry);
registerBuiltinDialectTranslation(registry);
registerLLVMDialectTranslation(registry);
+ registerPtrDialectTranslation(registry);
MLIRContext context(registry);
auto module = parseSourceString<ModuleOp>(moduleStr, &context);
ASSERT_TRUE(!!module);
@@ -166,6 +170,7 @@ TEST(NativeMemRefJit, SKIP_WITHOUT_JIT(RankOneMemref)) {
registerAllDialects(registry);
registerBuiltinDialectTranslation(registry);
registerLLVMDialectTranslation(registry);
+ registerPtrDialectTranslation(registry);
MLIRContext context(registry);
auto module = parseSourceString<ModuleOp>(moduleStr, &context);
ASSERT_TRUE(!!module);
@@ -221,6 +226,7 @@ TEST(NativeMemRefJit, SKIP_WITHOUT_JIT(BasicMemref)) {
registerAllDialects(registry);
registerBuiltinDialectTranslation(registry);
registerLLVMDialectTranslation(registry);
+ registerPtrDialectTranslation(registry);
MLIRContext context(registry);
OwningOpRef<ModuleOp> module =
parseSourceString<ModuleOp>(moduleStr, &context);
@@ -271,6 +277,7 @@ TEST(NativeMemRefJit, MAYBE_JITCallback) {
registerAllDialects(registry);
registerBuiltinDialectTranslation(registry);
registerLLVMDialectTranslation(registry);
+ registerPtrDialectTranslation(registry);
MLIRContext context(registry);
auto module = parseSourceString<ModuleOp>(moduleStr, &context);
ASSERT_TRUE(!!module);
>From 733f50e528d474321d9a5c4785ac83ef19a4b4ca Mon Sep 17 00:00:00 2001
From: Fabian Mora <fmora.dev at gmail.com>
Date: Wed, 3 Jan 2024 17:13:10 +0000
Subject: [PATCH 3/3] Update flang
---
flang/lib/Optimizer/CodeGen/CodeGen.cpp | 33 ++--
flang/lib/Optimizer/Support/CMakeLists.txt | 1 +
flang/lib/Optimizer/Support/InitFIR.cpp | 3 +
.../Fir/convert-to-llvm-openmp-and-fir.fir | 60 ++++----
flang/test/Fir/convert-to-llvm.fir | 144 +++++++++---------
flang/test/Fir/embox-char.fir | 48 +++---
flang/test/Fir/embox-substring.fir | 2 +-
flang/test/Fir/rebox-susbtring.fir | 4 +-
flang/test/Fir/tbaa.fir | 66 ++++----
flang/test/Lower/OpenMP/FIR/flush.f90 | 6 +-
10 files changed, 189 insertions(+), 178 deletions(-)
diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index e07732d57880c5..48c157dc1066ac 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -390,10 +390,10 @@ class FIROpConversion : public mlir::ConvertOpToLLVMPattern<FromOp> {
this->getTypeConverter());
}
- void attachTBAATag(mlir::LLVM::AliasAnalysisOpInterface op,
- mlir::Type baseFIRType, mlir::Type accessFIRType,
- mlir::LLVM::GEPOp gep) const {
- lowerTy().attachTBAATag(op, baseFIRType, accessFIRType, gep);
+ void attachTBAATag(mlir::Operation *op, mlir::Type baseFIRType,
+ mlir::Type accessFIRType, mlir::LLVM::GEPOp gep) const {
+ if (auto iface = mlir::dyn_cast<mlir::LLVM::AliasAnalysisOpInterface>(op))
+ lowerTy().attachTBAATag(iface, baseFIRType, accessFIRType, gep);
}
const fir::FIRToLLVMPassOptions &options;
@@ -3105,9 +3105,11 @@ struct LoadOpConversion : public FIROpConversion<fir::LoadOp> {
TODO(loc, "loading or assumed rank fir.box");
auto boxValue =
rewriter.create<mlir::LLVM::LoadOp>(loc, llvmLoadTy, inputBoxStorage);
- if (std::optional<mlir::ArrayAttr> optionalTag = load.getTbaa())
- boxValue.setTBAATags(*optionalTag);
- else
+ if (std::optional<mlir::ArrayAttr> optionalTag = load.getTbaa()) {
+ if (auto iface = mlir::dyn_cast<mlir::LLVM::AliasAnalysisOpInterface>(
+ boxValue.getOperation()))
+ iface.setTBAATags(*optionalTag);
+ } else
attachTBAATag(boxValue, boxTy, boxTy, nullptr);
auto newBoxStorage =
genAllocaWithType(loc, llvmLoadTy, defaultAlign, rewriter);
@@ -3118,9 +3120,11 @@ struct LoadOpConversion : public FIROpConversion<fir::LoadOp> {
} else {
auto loadOp = rewriter.create<mlir::LLVM::LoadOp>(
load.getLoc(), llvmLoadTy, adaptor.getOperands(), load->getAttrs());
- if (std::optional<mlir::ArrayAttr> optionalTag = load.getTbaa())
- loadOp.setTBAATags(*optionalTag);
- else
+ if (std::optional<mlir::ArrayAttr> optionalTag = load.getTbaa()) {
+ if (auto iface = mlir::dyn_cast<mlir::LLVM::AliasAnalysisOpInterface>(
+ loadOp.getOperation()))
+ iface.setTBAATags(*optionalTag);
+ } else
attachTBAATag(loadOp, load.getType(), load.getType(), nullptr);
rewriter.replaceOp(load, loadOp.getResult());
}
@@ -3364,9 +3368,11 @@ struct StoreOpConversion : public FIROpConversion<fir::StoreOp> {
newStoreOp = rewriter.create<mlir::LLVM::StoreOp>(
loc, adaptor.getOperands()[0], adaptor.getOperands()[1]);
}
- if (std::optional<mlir::ArrayAttr> optionalTag = store.getTbaa())
- newStoreOp.setTBAATags(*optionalTag);
- else
+ if (std::optional<mlir::ArrayAttr> optionalTag = store.getTbaa()) {
+ if (auto iface = mlir::dyn_cast<mlir::LLVM::AliasAnalysisOpInterface>(
+ newStoreOp.getOperation()))
+ iface.setTBAATags(*optionalTag);
+ } else
attachTBAATag(newStoreOp, storeTy, storeTy, nullptr);
rewriter.eraseOp(store);
return mlir::success();
@@ -3883,6 +3889,7 @@ class FIRToLLVMLowering
mlir::populateComplexToLLVMConversionPatterns(typeConverter, pattern);
mlir::populateVectorToLLVMConversionPatterns(typeConverter, pattern);
mlir::ConversionTarget target{*context};
+ target.addLegalDialect<mlir::ptr::PtrDialect>();
target.addLegalDialect<mlir::LLVM::LLVMDialect>();
// The OpenMP dialect is legal for Operations without regions, for those
// which contains regions it is legal if the region contains only the
diff --git a/flang/lib/Optimizer/Support/CMakeLists.txt b/flang/lib/Optimizer/Support/CMakeLists.txt
index 55f5718a90b854..b98f0988a3d20c 100644
--- a/flang/lib/Optimizer/Support/CMakeLists.txt
+++ b/flang/lib/Optimizer/Support/CMakeLists.txt
@@ -20,6 +20,7 @@ add_flang_library(FIRSupport
MLIROpenACCToLLVMIRTranslation
MLIROpenMPToLLVMIRTranslation
MLIRLLVMToLLVMIRTranslation
+ MLIRPtrToLLVMIRTranslation
MLIRTargetLLVMIRExport
MLIRTargetLLVMIRImport
diff --git a/flang/lib/Optimizer/Support/InitFIR.cpp b/flang/lib/Optimizer/Support/InitFIR.cpp
index 0753c4511d9c64..c9ef8b35bf0307 100644
--- a/flang/lib/Optimizer/Support/InitFIR.cpp
+++ b/flang/lib/Optimizer/Support/InitFIR.cpp
@@ -11,6 +11,7 @@
#include "mlir/Target/LLVMIR/Dialect/LLVMIR/LLVMToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/OpenACC/OpenACCToLLVMIRTranslation.h"
#include "mlir/Target/LLVMIR/Dialect/OpenMP/OpenMPToLLVMIRTranslation.h"
+#include "mlir/Target/LLVMIR/Dialect/Ptr/PtrToLLVMIRTranslation.h"
void fir::support::registerLLVMTranslation(mlir::MLIRContext &context) {
mlir::DialectRegistry registry;
@@ -22,5 +23,7 @@ void fir::support::registerLLVMTranslation(mlir::MLIRContext &context) {
registerLLVMDialectTranslation(registry);
// Register builtin dialect interface.
registerBuiltinDialectTranslation(registry);
+ // Register ptr dialect interface.
+ registerPtrDialectTranslation(registry);
context.appendDialectRegistry(registry);
}
diff --git a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir
index 6efa4d0a095869..cb96817f8834b8 100644
--- a/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir
+++ b/flang/test/Fir/convert-to-llvm-openmp-and-fir.fir
@@ -29,15 +29,15 @@ func.func @_QPsb1(%arg0: !fir.ref<i32> {fir.bindc_name = "n"}, %arg1: !fir.ref<!
// CHECK: omp.parallel {
// CHECK: %[[ONE_3:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[I_VAR:.*]] = llvm.alloca %[[ONE_3]] x i32 {pinned} : (i64) -> !llvm.ptr
-// CHECK: %[[N:.*]] = llvm.load %[[N_REF]] : !llvm.ptr -> i32
+// CHECK: %[[N:.*]] = ptr.load %[[N_REF]] : !llvm.ptr -> i32
// CHECK: omp.wsloop nowait
// CHECK-SAME: for (%[[I:.*]]) : i32 = (%[[ONE_2]]) to (%[[N]]) inclusive step (%[[ONE_2]]) {
-// CHECK: llvm.store %[[I]], %[[I_VAR]] : i32, !llvm.ptr
-// CHECK: %[[I1:.*]] = llvm.load %[[I_VAR]] : !llvm.ptr -> i32
+// CHECK: ptr.store %[[I]], %[[I_VAR]] : i32, !llvm.ptr
+// CHECK: %[[I1:.*]] = ptr.load %[[I_VAR]] : !llvm.ptr -> i32
// CHECK: %[[I1_EXT:.*]] = llvm.sext %[[I1]] : i32 to i64
// CHECK: %[[I_CSTYLE:.*]] = llvm.sub %[[I1_EXT]], %[[ONE_1]] : i64
// CHECK: %[[ARR_I_REF:.*]] = llvm.getelementptr %[[ARR_REF]][%[[I_CSTYLE]]] : (!llvm.ptr, i64) -> !llvm.ptr
-// CHECK: llvm.store %[[I1]], %[[ARR_I_REF]] : i32, !llvm.ptr
+// CHECK: ptr.store %[[I1]], %[[ARR_I_REF]] : i32, !llvm.ptr
// CHECK: omp.yield
// CHECK: }
// CHECK: omp.terminator
@@ -63,8 +63,8 @@ func.func @_QPsb2(%arg0: !fir.ref<i32> {fir.bindc_name = "x"}, %arg1: !fir.ref<i
// CHECK-SAME: %[[X_REF:.*]]: !llvm.ptr {fir.bindc_name = "x"}, %[[N_REF:.*]]: !llvm.ptr {fir.bindc_name = "n"}) {
// CHECK: omp.parallel {
// CHECK: omp.master {
-// CHECK: %[[N:.*]] = llvm.load %[[N_REF]] : !llvm.ptr -> i32
-// CHECK: llvm.store %[[N]], %[[X_REF]] : i32, !llvm.ptr
+// CHECK: %[[N:.*]] = ptr.load %[[N_REF]] : !llvm.ptr -> i32
+// CHECK: ptr.store %[[N]], %[[X_REF]] : i32, !llvm.ptr
// CHECK: omp.terminator
// CHECK: }
// CHECK: omp.terminator
@@ -99,7 +99,7 @@ func.func @_QPsb(%arr: !fir.box<!fir.array<?xi32>> {fir.bindc_name = "arr"}) {
// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[C50:.*]] = llvm.mlir.constant(50 : i32) : i32
// CHECK: omp.wsloop for (%[[INDX:.*]]) : i32 = (%[[C1]]) to (%[[C50]]) inclusive step (%[[C1]]) {
-// CHECK: llvm.store %[[INDX]], %{{.*}} : i32, !llvm.ptr
+// CHECK: ptr.store %[[INDX]], %{{.*}} : i32, !llvm.ptr
// CHECK: omp.yield
// CHECK: omp.terminator
// CHECK: llvm.return
@@ -201,15 +201,15 @@ func.func @_QPsimd1(%arg0: !fir.ref<i32> {fir.bindc_name = "n"}, %arg1: !fir.ref
// CHECK: omp.parallel {
// CHECK: %[[ONE_3:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[I_VAR:.*]] = llvm.alloca %[[ONE_3]] x i32 {pinned} : (i64) -> !llvm.ptr
-// CHECK: %[[N:.*]] = llvm.load %[[N_REF]] : !llvm.ptr -> i32
+// CHECK: %[[N:.*]] = ptr.load %[[N_REF]] : !llvm.ptr -> i32
// CHECK: omp.simdloop
// CHECK-SAME: (%[[I:.*]]) : i32 = (%[[ONE_2]]) to (%[[N]]) step (%[[ONE_2]]) {
-// CHECK: llvm.store %[[I]], %[[I_VAR]] : i32, !llvm.ptr
-// CHECK: %[[I1:.*]] = llvm.load %[[I_VAR]] : !llvm.ptr -> i32
+// CHECK: ptr.store %[[I]], %[[I_VAR]] : i32, !llvm.ptr
+// CHECK: %[[I1:.*]] = ptr.load %[[I_VAR]] : !llvm.ptr -> i32
// CHECK: %[[I1_EXT:.*]] = llvm.sext %[[I1]] : i32 to i64
// CHECK: %[[I_CSTYLE:.*]] = llvm.sub %[[I1_EXT]], %[[ONE_1]] : i64
// CHECK: %[[ARR_I_REF:.*]] = llvm.getelementptr %[[ARR_REF]][%[[I_CSTYLE]]] : (!llvm.ptr, i64) -> !llvm.ptr
-// CHECK: llvm.store %[[I1]], %[[ARR_I_REF]] : i32, !llvm.ptr
+// CHECK: ptr.store %[[I1]], %[[ARR_I_REF]] : i32, !llvm.ptr
// CHECK: omp.yield
// CHECK: }
// CHECK: omp.terminator
@@ -386,24 +386,24 @@ func.func @_QPopenmp_target_data_region() {
// CHECK: %[[VAL_16:.*]] = llvm.icmp "sgt" %[[VAL_14]], %[[VAL_15]] : i64
// CHECK: llvm.cond_br %[[VAL_16]], ^bb2, ^bb3
// CHECK: ^bb2:
-// CHECK: llvm.store %[[VAL_13]], %[[VAL_3]] : i32, !llvm.ptr
-// CHECK: %[[VAL_17:.*]] = llvm.load %[[VAL_3]] : !llvm.ptr -> i32
-// CHECK: %[[VAL_18:.*]] = llvm.load %[[VAL_3]] : !llvm.ptr -> i32
+// CHECK: ptr.store %[[VAL_13]], %[[VAL_3]] : i32, !llvm.ptr
+// CHECK: %[[VAL_17:.*]] = ptr.load %[[VAL_3]] : !llvm.ptr -> i32
+// CHECK: %[[VAL_18:.*]] = ptr.load %[[VAL_3]] : !llvm.ptr -> i32
// CHECK: %[[VAL_19:.*]] = llvm.sext %[[VAL_18]] : i32 to i64
// CHECK: %[[VAL_20:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[VAL_21:.*]] = llvm.sub %[[VAL_19]], %[[VAL_20]] : i64
// CHECK: %[[VAL_22:.*]] = llvm.getelementptr %[[VAL_1]][0, %[[VAL_21]]] : (!llvm.ptr, i64) -> !llvm.ptr
-// CHECK: llvm.store %[[VAL_17]], %[[VAL_22]] : i32, !llvm.ptr
+// CHECK: ptr.store %[[VAL_17]], %[[VAL_22]] : i32, !llvm.ptr
// CHECK: %[[VAL_23:.*]] = llvm.add %[[VAL_12]], %[[VAL_8]] : i64
// CHECK: %[[VAL_24:.*]] = llvm.trunc %[[VAL_8]] : i64 to i32
-// CHECK: %[[VAL_25:.*]] = llvm.load %[[VAL_3]] : !llvm.ptr -> i32
+// CHECK: %[[VAL_25:.*]] = ptr.load %[[VAL_3]] : !llvm.ptr -> i32
// CHECK: %[[VAL_26:.*]] = llvm.add %[[VAL_25]], %[[VAL_24]] : i32
// CHECK: %[[VAL_27:.*]] = llvm.add %[[VAL_12]], %[[VAL_8]] : i64
// CHECK: %[[VAL_28:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: %[[VAL_29:.*]] = llvm.sub %[[VAL_14]], %[[VAL_28]] : i64
// CHECK: llvm.br ^bb1(%[[VAL_27]], %[[VAL_26]], %[[VAL_29]] : i64, i32, i64)
// CHECK: ^bb3:
-// CHECK: llvm.store %[[VAL_13]], %[[VAL_3]] : i32, !llvm.ptr
+// CHECK: ptr.store %[[VAL_13]], %[[VAL_3]] : i32, !llvm.ptr
// CHECK: omp.terminator
// CHECK: }
// CHECK: llvm.return
@@ -463,7 +463,7 @@ func.func @_QPomp_target() {
// CHECK: %[[VAL_5:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[VAL_6:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[VAL_7:.*]] = llvm.getelementptr %[[ARG_0]][0, %[[VAL_6]]] : (!llvm.ptr, i64) -> !llvm.ptr
-// CHECK: llvm.store %[[VAL_3]], %[[VAL_7]] : i32, !llvm.ptr
+// CHECK: ptr.store %[[VAL_3]], %[[VAL_7]] : i32, !llvm.ptr
// CHECK: omp.terminator
// CHECK: }
// CHECK: llvm.return
@@ -669,9 +669,9 @@ func.func @_QPsb() {
// CHECK: %[[EXIT_COND:.*]] = llvm.icmp "sgt"
// CHECK: llvm.cond_br %[[EXIT_COND]], ^[[BB_LOOP_BODY:.*]], ^[[BB_EXIT:.*]]
// CHECK: ^[[BB_LOOP_BODY]]:
-// CHECK: %[[LI_VAL:.*]] = llvm.load %[[LI_REF]] : !llvm.ptr -> i32
+// CHECK: %[[LI_VAL:.*]] = ptr.load %[[LI_REF]] : !llvm.ptr -> i32
// CHECK: %[[LI_INC:.*]] = llvm.add %[[LI_VAL]], %[[ONE]] : i32
-// CHECK: llvm.store %[[LI_INC]], %[[LI_REF]] : i32, !llvm.ptr
+// CHECK: ptr.store %[[LI_INC]], %[[LI_REF]] : i32, !llvm.ptr
// CHECK: llvm.br ^[[BB_ENTRY]]({{.*}})
// CHECK: ^[[BB_EXIT]]:
// CHECK: omp.terminator
@@ -703,7 +703,7 @@ func.func @_QPsb() {
// CHECK: omp.parallel {
// CHECK: omp.wsloop reduction(@[[EQV_REDUCTION]] -> %[[RED_ACCUMULATOR]] : !llvm.ptr) for
// CHECK: %[[ARRAY_ELEM_REF:.*]] = llvm.getelementptr %[[ARRAY_REF]][0, %{{.*}}] : (!llvm.ptr, i64) -> !llvm.ptr
-// CHECK: %[[ARRAY_ELEM:.*]] = llvm.load %[[ARRAY_ELEM_REF]] : !llvm.ptr -> i32
+// CHECK: %[[ARRAY_ELEM:.*]] = ptr.load %[[ARRAY_ELEM_REF]] : !llvm.ptr -> i32
// CHECK: omp.reduction %[[ARRAY_ELEM]], %[[RED_ACCUMULATOR]] : i32, !llvm.ptr
// CHECK: omp.yield
// CHECK: omp.terminator
@@ -769,10 +769,10 @@ func.func @_QPs(%arg0: !fir.ref<!fir.complex<4>> {fir.bindc_name = "x"}) {
//CHECK: omp.parallel {
//CHECK: %[[CONST_1:.*]] = llvm.mlir.constant(1 : i32) : i32
//CHECK: %[[ALLOCA_1:.*]] = llvm.alloca %[[CONST_1:.*]] x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
-//CHECK: %[[LOAD:.*]] = llvm.load %[[ALLOCA]] : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>
-//CHECK: llvm.store %[[LOAD]], %[[ALLOCA_1]] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>, !llvm.ptr
+//CHECK: %[[LOAD:.*]] = ptr.load %[[ALLOCA]] : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>
+//CHECK: ptr.store %[[LOAD]], %[[ALLOCA_1]] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>, !llvm.ptr
//CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ALLOCA_1]][0, 0] : (!llvm.ptr) -> !llvm.ptr
-//CHECK: %[[LOAD_2:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> !llvm.ptr
+//CHECK: %[[LOAD_2:.*]] = ptr.load %[[GEP]] : !llvm.ptr -> !llvm.ptr
//CHECK: omp.terminator
//CHECK: }
@@ -852,13 +852,13 @@ func.func @sub_() {
omp.flush(%arg0, %arg1, %arg2 : !fir.ref<i32>, !fir.ref<i32>, !fir.ref<i32>)
// CHECK: omp.flush
omp.flush
-// CHECK: %[[A_VAL:.*]] = llvm.load %[[ARG_A]] : !llvm.ptr -> i32
+// CHECK: %[[A_VAL:.*]] = ptr.load %[[ARG_A]] : !llvm.ptr -> i32
%0 = fir.load %arg0 : !fir.ref<i32>
-// CHECK: %[[B_VAL:.*]] = llvm.load %[[ARG_B]] : !llvm.ptr -> i32
+// CHECK: %[[B_VAL:.*]] = ptr.load %[[ARG_B]] : !llvm.ptr -> i32
%1 = fir.load %arg1 : !fir.ref<i32>
// CHECK: %[[C_VAL:.*]] = llvm.add %[[A_VAL]], %[[B_VAL]] : i32
%2 = arith.addi %0, %1 : i32
-// CHECK: llvm.store %[[C_VAL]], %[[ARG_C]] : i32, !llvm.ptr
+// CHECK: ptr.store %[[C_VAL]], %[[ARG_C]] : i32, !llvm.ptr
fir.store %2 to %arg2 : !fir.ref<i32>
// CHECK: omp.terminator
omp.terminator
@@ -880,13 +880,13 @@ func.func @omp_critical_() {
%1 = fir.alloca i32 {bindc_name = "y", uniq_name = "_QFomp_criticalEy"}
// CHECK: omp.critical(@help)
omp.critical(@help) {
-// CHECK: %[[X_VAL:.*]] = llvm.load %[[X_REF]] : !llvm.ptr -> i32
+// CHECK: %[[X_VAL:.*]] = ptr.load %[[X_REF]] : !llvm.ptr -> i32
%2 = fir.load %0 : !fir.ref<i32>
-// CHECK: %[[Y_VAL:.*]] = llvm.load %[[Y_REF]] : !llvm.ptr -> i32
+// CHECK: %[[Y_VAL:.*]] = ptr.load %[[Y_REF]] : !llvm.ptr -> i32
%3 = fir.load %1 : !fir.ref<i32>
// CHECK: %[[RESULT:.*]] = llvm.add %[[X_VAL]], %[[Y_VAL]] : i32
%4 = arith.addi %2, %3 : i32
-// CHECK: llvm.store %[[RESULT]], %[[X_REF]] : i32, !llvm.ptr
+// CHECK: ptr.store %[[RESULT]], %[[X_REF]] : i32, !llvm.ptr
fir.store %4 to %0 : !fir.ref<i32>
// CHECK: omp.terminator
omp.terminator
diff --git a/flang/test/Fir/convert-to-llvm.fir b/flang/test/Fir/convert-to-llvm.fir
index be82ffab7e33ef..de30c48881d297 100644
--- a/flang/test/Fir/convert-to-llvm.fir
+++ b/flang/test/Fir/convert-to-llvm.fir
@@ -217,7 +217,7 @@ func.func @test_alloc_and_freemem_one() {
// CHECK-LABEL: llvm.func @test_alloc_and_freemem_one() {
// CHECK-NEXT: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK-NEXT: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
-// CHECK-NEXT: %[[N:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64
+// CHECK-NEXT: %[[N:.*]] = ptr.ptrtoint %[[GEP]] : !llvm.ptr to i64
// CHECK-NEXT: llvm.call @malloc(%[[N]])
// CHECK: llvm.call @free(%{{.*}})
// CHECK-NEXT: llvm.return
@@ -236,7 +236,7 @@ func.func @test_alloc_and_freemem_several() {
// CHECK-LABEL: llvm.func @test_alloc_and_freemem_several() {
// CHECK: [[NULL:%.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: [[PTR:%.*]] = llvm.getelementptr [[NULL]][{{.*}}] : (!llvm.ptr) -> !llvm.ptr, !llvm.array<100 x f32>
-// CHECK: [[N:%.*]] = llvm.ptrtoint [[PTR]] : !llvm.ptr to i64
+// CHECK: [[N:%.*]] = ptr.ptrtoint [[PTR]] : !llvm.ptr to i64
// CHECK: [[MALLOC:%.*]] = llvm.call @malloc([[N]])
// CHECK: llvm.call @free([[MALLOC]])
// CHECK: llvm.return
@@ -252,7 +252,7 @@ func.func @test_with_shape(%ncols: index, %nrows: index) {
// CHECK-SAME: %[[NCOLS:.*]]: i64, %[[NROWS:.*]]: i64
// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
-// CHECK: %[[FOUR:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64
+// CHECK: %[[FOUR:.*]] = ptr.ptrtoint %[[GEP]] : !llvm.ptr to i64
// CHECK: %[[DIM1_SIZE:.*]] = llvm.mul %[[FOUR]], %[[NCOLS]] : i64
// CHECK: %[[TOTAL_SIZE:.*]] = llvm.mul %[[DIM1_SIZE]], %[[NROWS]] : i64
// CHECK: %[[MEM:.*]] = llvm.call @malloc(%[[TOTAL_SIZE]])
@@ -270,7 +270,7 @@ func.func @test_string_with_shape(%len: index, %nelems: index) {
// CHECK-SAME: %[[LEN:.*]]: i64, %[[NELEMS:.*]]: i64)
// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
-// CHECK: %[[ONE:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64
+// CHECK: %[[ONE:.*]] = ptr.ptrtoint %[[GEP]] : !llvm.ptr to i64
// CHECK: %[[LEN_SIZE:.*]] = llvm.mul %[[ONE]], %[[LEN]] : i64
// CHECK: %[[TOTAL_SIZE:.*]] = llvm.mul %[[LEN_SIZE]], %[[NELEMS]] : i64
// CHECK: %[[MEM:.*]] = llvm.call @malloc(%[[TOTAL_SIZE]])
@@ -748,7 +748,7 @@ func.func @convert_from_int(%arg0 : i32) {
// CHECK: %{{.*}} = llvm.trunc %[[ARG0]] : i32 to i16
// CHECK-NOT: %{{.*}} = llvm.trunc %[[ARG0]] : i32 to i32
// CHECK: %{{.*}} = llvm.sext %[[ARG0]] : i32 to i64
-// CHECK: %{{.*}} = llvm.inttoptr %{{.*}} : i64 to !llvm.ptr
+// CHECK: %{{.*}} = ptr.inttoptr %{{.*}} : i64 to !llvm.ptr
func.func @convert_from_i1(%arg0 : i1) {
@@ -773,7 +773,7 @@ func.func @convert_from_ref(%arg0 : !fir.ref<i32>) {
// CHECK-LABEL: convert_from_ref(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr
// CHECK-NOT: %{{.*}} = llvm.bitcast %[[ARG0]] : !llvm.ptr to !llvm.ptr
-// CHECK: %{{.*}} = llvm.ptrtoint %[[ARG0]] : !llvm.ptr to i32
+// CHECK: %{{.*}} = ptr.ptrtoint %[[ARG0]] : !llvm.ptr to i32
// -----
@@ -847,7 +847,7 @@ func.func @test_constc8() -> !fir.complex<8> {
// -----
-// Test `fir.store` --> `llvm.store` conversion
+// Test `fir.store` --> `ptr.store` conversion
func.func @test_store_index(%val_to_store : index, %addr : !fir.ref<index>) {
fir.store %val_to_store to %addr : !fir.ref<index>
@@ -856,7 +856,7 @@ func.func @test_store_index(%val_to_store : index, %addr : !fir.ref<index>) {
// CHECK-LABEL: llvm.func @test_store_index
// CHECK-SAME: (%[[arg0:.*]]: i64, %[[arg1:.*]]: !llvm.ptr) {
-// CHECK-NEXT: llvm.store %[[arg0]], %[[arg1]] : i64, !llvm.ptr
+// CHECK-NEXT: ptr.store %[[arg0]], %[[arg1]] : i64, !llvm.ptr
// CHECK-NEXT: llvm.return
// CHECK-NEXT: }
@@ -868,8 +868,8 @@ func.func @test_store_box(%array : !fir.ref<!fir.box<!fir.array<?x?xf32>>>, %box
// CHECK-LABEL: llvm.func @test_store_box
// CHECK-SAME: (%[[arg0:.*]]: !llvm.ptr,
// CHECK-SAME: %[[arg1:.*]]: !llvm.ptr) {
-// CHECK-NEXT: %[[box_to_store:.*]] = llvm.load %arg1 : !llvm.ptr -> !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i{{.*}}>>)>
-// CHECK-NEXT: llvm.store %[[box_to_store]], %[[arg0]] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i{{.*}}>>)>, !llvm.ptr
+// CHECK-NEXT: %[[box_to_store:.*]] = ptr.load %arg1 : !llvm.ptr -> !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i{{.*}}>>)>
+// CHECK-NEXT: ptr.store %[[box_to_store]], %[[arg0]] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i{{.*}}>>)>, !llvm.ptr
// CHECK-NEXT: llvm.return
// CHECK-NEXT: }
@@ -882,19 +882,19 @@ func.func @store_unlimited_polymorphic_box(%arg0 : !fir.class<none>, %arg1 : !fi
return
}
// CHECK-LABEL: llvm.func @store_unlimited_polymorphic_box(
-// CHECK: %[[VAL_8:.*]] = llvm.load %{{.*}} : !llvm.ptr -> !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)>
-// CHECK: llvm.store %[[VAL_8]], %{{.*}} : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)>, !llvm.ptr
-// CHECK: %[[VAL_9:.*]] = llvm.load %{{.*}} : !llvm.ptr -> !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i{{.*}}>>, ptr, array<1 x i{{.*}}>)>
-// CHECK: llvm.store %[[VAL_9]], %{{.*}} : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i{{.*}}>>, ptr, array<1 x i{{.*}}>)>, !llvm.ptr
-// CHECK: %[[VAL_10:.*]] = llvm.load %{{.*}} : !llvm.ptr -> !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)>
-// CHECK: llvm.store %[[VAL_10]], %{{.*}} : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)>, !llvm.ptr
-// CHECK: %[[VAL_11:.*]] = llvm.load %{{.*}}: !llvm.ptr
-// CHECK: llvm.store %[[VAL_11]], %{{.*}} : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i{{.*}}>>, ptr, array<1 x i{{.*}}>)>, !llvm.ptr
+// CHECK: %[[VAL_8:.*]] = ptr.load %{{.*}} : !llvm.ptr -> !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)>
+// CHECK: ptr.store %[[VAL_8]], %{{.*}} : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)>, !llvm.ptr
+// CHECK: %[[VAL_9:.*]] = ptr.load %{{.*}} : !llvm.ptr -> !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i{{.*}}>>, ptr, array<1 x i{{.*}}>)>
+// CHECK: ptr.store %[[VAL_9]], %{{.*}} : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i{{.*}}>>, ptr, array<1 x i{{.*}}>)>, !llvm.ptr
+// CHECK: %[[VAL_10:.*]] = ptr.load %{{.*}} : !llvm.ptr -> !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)>
+// CHECK: ptr.store %[[VAL_10]], %{{.*}} : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)>, !llvm.ptr
+// CHECK: %[[VAL_11:.*]] = ptr.load %{{.*}}: !llvm.ptr
+// CHECK: ptr.store %[[VAL_11]], %{{.*}} : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i{{.*}}>>, ptr, array<1 x i{{.*}}>)>, !llvm.ptr
// -----
-// Test `fir.load` --> `llvm.load` conversion
+// Test `fir.load` --> `ptr.load` conversion
func.func @test_load_index(%addr : !fir.ref<index>) {
%0 = fir.load %addr : !fir.ref<index>
@@ -903,7 +903,7 @@ func.func @test_load_index(%addr : !fir.ref<index>) {
// CHECK-LABEL: llvm.func @test_load_index(
// CHECK-SAME: %[[arg1:.*]]: !llvm.ptr) {
-// CHECK-NEXT: %0 = llvm.load %[[arg1]] : !llvm.ptr -> i64
+// CHECK-NEXT: %0 = ptr.load %[[arg1]] : !llvm.ptr -> i64
// CHECK-NEXT: llvm.return
// CHECK-NEXT: }
@@ -920,8 +920,8 @@ func.func @test_load_box(%addr : !fir.ref<!fir.box<!fir.array<10xf32>>>) {
// CHECK-SAME: %[[arg0:.*]]: !llvm.ptr) {
// CHECK-NEXT: %[[c1:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK-NEXT: %[[box_copy:.*]] = llvm.alloca %[[c1]] x !llvm.struct<([[DESC_TYPE:.*]])>
-// CHECK-NEXT: %[[box_val:.*]] = llvm.load %[[arg0]] : !llvm.ptr -> !llvm.struct<([[DESC_TYPE]])>
-// CHECK-NEXT: llvm.store %[[box_val]], %[[box_copy]] : !llvm.struct<([[DESC_TYPE]])>, !llvm.ptr
+// CHECK-NEXT: %[[box_val:.*]] = ptr.load %[[arg0]] : !llvm.ptr -> !llvm.struct<([[DESC_TYPE]])>
+// CHECK-NEXT: ptr.store %[[box_val]], %[[box_copy]] : !llvm.struct<([[DESC_TYPE]])>, !llvm.ptr
// CHECK-NEXT: llvm.call @takes_box(%[[box_copy]]) : (!llvm.ptr) -> ()
// CHECK-NEXT: llvm.return
// CHECK-NEXT: }
@@ -938,7 +938,7 @@ func.func @extract_rank(%arg0: !fir.box<!fir.array<*:f64>>) -> i32 {
// CHECK-LABEL: llvm.func @extract_rank(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr) -> i32
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 3] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
-// CHECK: %[[RANK:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> i32
+// CHECK: %[[RANK:.*]] = ptr.load %[[GEP]] : !llvm.ptr -> i32
// CHECK: llvm.return %[[RANK]] : i32
// -----
@@ -953,7 +953,7 @@ func.func @extract_addr(%arg0: !fir.box<!fir.array<*:f64>>) -> !fir.ref<f64> {
// CHECK-LABEL: llvm.func @extract_addr(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr) -> !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
-// CHECK: %[[ADDR:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> !llvm.ptr
+// CHECK: %[[ADDR:.*]] = ptr.load %[[GEP]] : !llvm.ptr -> !llvm.ptr
// CHECK: llvm.return %[[ADDR]] : !llvm.ptr
// -----
@@ -971,11 +971,11 @@ func.func @extract_dims(%arg0: !fir.box<!fir.array<*:f64>>) -> index {
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr) -> i64
// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[GEP0:.*]] = llvm.getelementptr %[[ARG0]][0, 7, %[[C0]], 0] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
-// CHECK: %[[LOAD0:.*]] = llvm.load %[[GEP0]] : !llvm.ptr -> i64
+// CHECK: %[[LOAD0:.*]] = ptr.load %[[GEP0]] : !llvm.ptr -> i64
// CHECK: %[[GEP1:.*]] = llvm.getelementptr %[[ARG0]][0, 7, %[[C0]], 1] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
-// CHECK: %[[LOAD1:.*]] = llvm.load %[[GEP1]] : !llvm.ptr -> i64
+// CHECK: %[[LOAD1:.*]] = ptr.load %[[GEP1]] : !llvm.ptr -> i64
// CHECK: %[[GEP2:.*]] = llvm.getelementptr %[[ARG0]][0, 7, %[[C0]], 2] : (!llvm.ptr, i32) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
-// CHECK: %[[LOAD2:.*]] = llvm.load %[[GEP2]] : !llvm.ptr -> i64
+// CHECK: %[[LOAD2:.*]] = ptr.load %[[GEP2]] : !llvm.ptr -> i64
// CHECK: llvm.return %[[LOAD0]] : i64
// -----
@@ -990,7 +990,7 @@ func.func @extract_elesize(%arg0: !fir.box<f32>) -> i32 {
// CHECK-LABEL: llvm.func @extract_elesize(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr) -> i32
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
-// CHECK: %[[ELE_SIZE:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> i32
+// CHECK: %[[ELE_SIZE:.*]] = ptr.load %[[GEP]] : !llvm.ptr -> i32
// CHECK: llvm.return %[[ELE_SIZE]] : i32
// -----
@@ -1006,7 +1006,7 @@ func.func @box_isarray(%arg0: !fir.box<!fir.array<*:f64>>) -> i1 {
// CHECK-LABEL: llvm.func @box_isarray(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr) -> i1
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 3] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
-// CHECK: %[[RANK:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> i32
+// CHECK: %[[RANK:.*]] = ptr.load %[[GEP]] : !llvm.ptr -> i32
// CHECK: %[[C0_ISARRAY:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[IS_ARRAY:.*]] = llvm.icmp "ne" %[[RANK]], %[[C0_ISARRAY]] : i32
// CHECK: llvm.return %[[IS_ARRAY]] : i1
@@ -1025,7 +1025,7 @@ func.func @box_isalloc(%arg0: !fir.box<!fir.array<*:f64>>) -> i1 {
// CHECK-LABEL: llvm.func @box_isalloc(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr) -> i1
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 5] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
-// CHECK: %[[ATTR:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> i32
+// CHECK: %[[ATTR:.*]] = ptr.load %[[GEP]] : !llvm.ptr -> i32
// CHECK: %[[ATTR_ISALLOC:.*]] = llvm.mlir.constant(2 : i32) : i32
// CHECK: %[[AND:.*]] = llvm.and %[[ATTR]], %[[ATTR_ISALLOC]] : i32
// CHECK: %[[CMP_C0:.*]] = llvm.mlir.constant(0 : i32) : i32
@@ -1046,7 +1046,7 @@ func.func @box_isptr(%arg0: !fir.box<!fir.array<*:f64>>) -> i1 {
// CHECK-LABEL: llvm.func @box_isptr(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr) -> i1
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 5] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
-// CHECK: %[[ATTR:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> i32
+// CHECK: %[[ATTR:.*]] = ptr.load %[[GEP]] : !llvm.ptr -> i32
// CHECK: %[[ATTR_ISALLOC:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[AND:.*]] = llvm.and %[[ATTR]], %[[ATTR_ISALLOC]] : i32
// CHECK: %[[CMP_C0:.*]] = llvm.mlir.constant(0 : i32) : i32
@@ -1263,7 +1263,7 @@ func.func @select_case_integer(%arg0: !fir.ref<i32>) -> i32 {
// CHECK-LABEL: llvm.func @select_case_integer(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr) -> i32 {
-// CHECK: %[[SELECT_VALUE:.*]] = llvm.load %[[ARG0]] : !llvm.ptr -> i32
+// CHECK: %[[SELECT_VALUE:.*]] = ptr.load %[[ARG0]] : !llvm.ptr -> i32
// CHECK: %[[CST1:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[CST2:.*]] = llvm.mlir.constant(2 : i32) : i32
// CHECK: %[[CST4:.*]] = llvm.mlir.constant(4 : i32) : i32
@@ -1322,7 +1322,7 @@ func.func @select_case_integer(%arg0: !fir.ref<i32>) -> i32 {
// CHECK: llvm.br ^bb14
// Block ^bb6 in original FIR code.
// CHECK-LABEL: ^bb14:
-// CHECK: %[[RET:.*]] = llvm.load %[[ARG0:.*]] : !llvm.ptr -> i32
+// CHECK: %[[RET:.*]] = ptr.load %[[ARG0:.*]] : !llvm.ptr -> i32
// CHECK: llvm.return %[[RET]] : i32
// -----
@@ -1349,7 +1349,7 @@ func.func @select_case_logical(%arg0: !fir.ref<!fir.logical<4>>) {
// CHECK-LABEL: llvm.func @select_case_logical(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr
-// CHECK: %[[LOAD_ARG0:.*]] = llvm.load %[[ARG0]] : !llvm.ptr -> i32
+// CHECK: %[[LOAD_ARG0:.*]] = ptr.load %[[ARG0]] : !llvm.ptr -> i32
// CHECK: %[[CST_ZERO:.*]] = llvm.mlir.constant(0 : i64) : i32
// CHECK: %[[SELECT_VALUE:.*]] = llvm.icmp "ne" %[[LOAD_ARG0]], %[[CST_ZERO]] : i32
// CHECK: %[[CST_FALSE:.*]] = llvm.mlir.constant(false) : i1
@@ -1380,7 +1380,7 @@ func.func @test_is_present_i64(%arg0: !fir.ref<i64>) -> () {
// CHECK-LABEL: @test_is_present_i64
// CHECK-SAME: (%[[arg:.*]]: !llvm.ptr)
// CHECK-NEXT: %[[constant:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK-NEXT: %[[ptr:.*]] = llvm.ptrtoint %[[arg]] : !llvm.ptr to i64
+// CHECK-NEXT: %[[ptr:.*]] = ptr.ptrtoint %[[arg]] : !llvm.ptr to i64
// CHECK-NEXT: %{{.*}} = llvm.icmp "ne" %[[ptr]], %[[constant]] : i64
// CHECK-NEXT: llvm.return
// CHECK-NEXT: }
@@ -1393,7 +1393,7 @@ func.func @test_is_present_box(%arg0: !fir.box<!fir.ref<i64>>) -> () {
// CHECK-LABEL: @test_is_present_box
// CHECK-SAME: (%[[arg:.*]]: !llvm.ptr)
// CHECK-NEXT: %[[constant:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK-NEXT: %[[ptr:.*]] = llvm.ptrtoint %[[arg]] : !llvm.ptr to i64
+// CHECK-NEXT: %[[ptr:.*]] = ptr.ptrtoint %[[arg]] : !llvm.ptr to i64
// CHECK-NEXT: %{{.*}} = llvm.icmp "ne" %[[ptr]], %[[constant]] : i64
// CHECK-NEXT: llvm.return
// CHECK-NEXT: }
@@ -1435,7 +1435,7 @@ func.func @is_present(%arg0: !fir.ref<i64>) -> i1 {
// CHECK-LABEL: @is_present
// CHECK-SAME: (%[[arg:.*]]: !llvm.ptr) -> i1
// CHECK-NEXT: %[[constant:.*]] = llvm.mlir.constant(0 : i64) : i64
-// CHECK-NEXT: %[[ptr:.*]] = llvm.ptrtoint %[[arg]] : !llvm.ptr to i64
+// CHECK-NEXT: %[[ptr:.*]] = ptr.ptrtoint %[[arg]] : !llvm.ptr to i64
// CHECK-NEXT: %[[ret_val:.*]] = llvm.icmp "ne" %[[ptr]], %[[constant]] : i64
// CHECK-NEXT: llvm.return %[[ret_val]] : i1
// CHECK-NEXT: }
@@ -1532,7 +1532,7 @@ func.func @box_tdesc(%arg0: !fir.box<!fir.type<dtdesc{a:i32}>>) {
// CHECK-LABEL: llvm.func @box_tdesc(
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr) {
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 7] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)>
-// CHECK: %[[LOAD:.*]] = llvm.load %[[GEP]] : !llvm.ptr -> !llvm.ptr
+// CHECK: %[[LOAD:.*]] = ptr.load %[[GEP]] : !llvm.ptr -> !llvm.ptr
// -----
@@ -1555,7 +1555,7 @@ func.func @embox0(%arg0: !fir.ref<!fir.array<100xi32>>) {
// CHECK: %[[TYPE_CODE:.*]] = llvm.mlir.constant(9 : i32) : i32
// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
-// CHECK: %[[I64_ELEM_SIZE:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64
+// CHECK: %[[I64_ELEM_SIZE:.*]] = ptr.ptrtoint %[[GEP]] : !llvm.ptr to i64
// CHECK: %[[DESC:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
// CHECK: %[[DESC0:.*]] = llvm.insertvalue %[[I64_ELEM_SIZE]], %[[DESC]][1] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
// CHECK: %[[CFI_VERSION:.*]] = llvm.mlir.constant(20180515 : i32) : i32
@@ -1572,7 +1572,7 @@ func.func @embox0(%arg0: !fir.ref<!fir.array<100xi32>>) {
// CHECK: %[[F18ADDENDUM_I8:.*]] = llvm.trunc %[[F18ADDENDUM]] : i32 to i8
// CHECK: %[[DESC5:.*]] = llvm.insertvalue %[[F18ADDENDUM_I8]], %[[DESC4]][6] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
// CHECK: %[[DESC6:.*]] = llvm.insertvalue %[[ARG0]], %[[DESC5]][0] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>
-// CHECK: llvm.store %[[DESC6]], %[[ALLOCA]] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>, !llvm.ptr
+// CHECK: ptr.store %[[DESC6]], %[[ALLOCA]] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>, !llvm.ptr
// Check `fir.embox` in a `fir.global`. Descriptors created by `fir.embox`
// conversion are not generating `alloca` instructions. This test make sure of
@@ -1753,8 +1753,8 @@ func.func @no_reassoc(%arg0: !fir.ref<i32>) {
// CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr) {
// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[ALLOC:.*]] = llvm.alloca %[[C1]] x i32 : (i64) -> !llvm.ptr
-// CHECK: %[[LOAD:.*]] = llvm.load %[[ARG0]] : !llvm.ptr -> i32
-// CHECK: llvm.store %[[LOAD]], %[[ALLOC]] : i32, !llvm.ptr
+// CHECK: %[[LOAD:.*]] = ptr.load %[[ARG0]] : !llvm.ptr -> i32
+// CHECK: ptr.store %[[LOAD]], %[[ALLOC]] : i32, !llvm.ptr
// CHECK: llvm.return
// -----
@@ -1777,7 +1777,7 @@ func.func @xembox0(%arg0: !fir.ref<!fir.array<?xi32>>) {
// CHECK: %[[TYPE:.*]] = llvm.mlir.constant(9 : i32) : i32
// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
-// CHECK: %[[ELEM_LEN_I64:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64
+// CHECK: %[[ELEM_LEN_I64:.*]] = ptr.ptrtoint %[[GEP]] : !llvm.ptr to i64
// CHECK: %[[BOX0:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[BOX1:.*]] = llvm.insertvalue %[[ELEM_LEN_I64]], %[[BOX0]][1] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[VERSION:.*]] = llvm.mlir.constant(20180515 : i32) : i32
@@ -1811,7 +1811,7 @@ func.func @xembox0(%arg0: !fir.ref<!fir.array<?xi32>>) {
// CHECK: %[[PREV_PTROFF:.*]] = llvm.mul %[[ONE]], %[[C0]] : i64
// CHECK: %[[BASE_PTR:.*]] = llvm.getelementptr %[[ARG0]][%[[PTR_OFFSET]]] : (!llvm.ptr, i64) -> !llvm.ptr, i32
// CHECK: %[[BOX10:.*]] = llvm.insertvalue %[[BASE_PTR]], %[[BOX9]][0] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
-// CHECK: llvm.store %[[BOX10]], %[[ALLOCA]] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>, !llvm.ptr
+// CHECK: ptr.store %[[BOX10]], %[[ALLOCA]] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>, !llvm.ptr
// Check adjustment of element scaling factor.
@@ -1825,7 +1825,7 @@ func.func @xembox1(%arg0: !fir.ref<!fir.array<?x!fir.char<1, 10>>>) {
// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
-// CHECK: %[[ELEM_LEN_I64:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64
+// CHECK: %[[ELEM_LEN_I64:.*]] = ptr.ptrtoint %[[GEP]] : !llvm.ptr to i64
// CHECK: %{{.*}} = llvm.insertvalue %[[ELEM_LEN_I64]], %{{.*}}[1] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[PREV_PTROFF:.*]] = llvm.mul %[[ELEM_LEN_I64]], %[[C0]] : i64
@@ -1875,7 +1875,7 @@ func.func private @_QPxb(!fir.box<!fir.array<?x?xf64>>)
// CHECK: %[[TYPE_CODE:.*]] = llvm.mlir.constant(28 : i32) : i32
// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
-// CHECK: %[[ELEM_LEN_I64:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64
+// CHECK: %[[ELEM_LEN_I64:.*]] = ptr.ptrtoint %[[GEP]] : !llvm.ptr to i64
// CHECK: %[[BOX0:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[BOX1:.*]] = llvm.insertvalue %[[ELEM_LEN_I64]], %[[BOX0]][1] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[VERSION:.*]] = llvm.mlir.constant(20180515 : i32) : i32
@@ -1921,7 +1921,7 @@ func.func private @_QPxb(!fir.box<!fir.array<?x?xf64>>)
// CHECK: %[[BOX12:.*]] = llvm.insertvalue %[[STRIDE_MUL]], %[[BOX11]][7, 1, 2] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
// CHECK: %[[BASE_PTR:.*]] = llvm.getelementptr %[[ARR]][%[[PTR_OFFSET0]]] : (!llvm.ptr, i64) -> !llvm.ptr, f64
// CHECK: %[[BOX13:.*]] = llvm.insertvalue %[[BASE_PTR]], %[[BOX12]][0] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
-// CHECK: llvm.store %[[BOX13]], %[[ALLOCA]] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>, !llvm.ptr
+// CHECK: ptr.store %[[BOX13]], %[[ALLOCA]] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>, !llvm.ptr
// Conversion with a subcomponent.
@@ -1953,7 +1953,7 @@ func.func private @_QPtest_dt_callee(%arg0: !fir.box<!fir.array<?xi32>>)
// CHECK: %[[TYPE_CODE:.*]] = llvm.mlir.constant(9 : i32) : i32
// CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
-// CHECK: %[[ELEM_LEN_I64:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64
+// CHECK: %[[ELEM_LEN_I64:.*]] = ptr.ptrtoint %[[GEP]] : !llvm.ptr to i64
// CHECK: %[[BOX0:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[BOX1:.*]] = llvm.insertvalue %[[ELEM_LEN_I64]], %[[BOX0]][1] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[VERSION:.*]] = llvm.mlir.constant(20180515 : i32) : i32
@@ -1973,7 +1973,7 @@ func.func private @_QPtest_dt_callee(%arg0: !fir.box<!fir.array<?xi32>>)
// CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[ELE_TYPE:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[GEP_DTYPE_SIZE:.*]] = llvm.getelementptr %[[ELE_TYPE]][1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"_QFtest_dt_sliceTt", (i32, i32)>
-// CHECK: %[[PTRTOINT_DTYPE_SIZE:.*]] = llvm.ptrtoint %[[GEP_DTYPE_SIZE]] : !llvm.ptr to i64
+// CHECK: %[[PTRTOINT_DTYPE_SIZE:.*]] = ptr.ptrtoint %[[GEP_DTYPE_SIZE]] : !llvm.ptr to i64
// CHECK: %[[ADJUSTED_OFFSET:.*]] = llvm.sub %[[C1]], %[[ONE]] : i64
// CHECK: %[[EXT_SUB:.*]] = llvm.sub %[[C10]], %[[C1]] : i64
// CHECK: %[[EXT_ADD:.*]] = llvm.add %[[EXT_SUB]], %[[C2]] : i64
@@ -1986,7 +1986,7 @@ func.func private @_QPtest_dt_callee(%arg0: !fir.box<!fir.array<?xi32>>)
// CHECK: %[[BOX9:.*]] = llvm.insertvalue %[[STRIDE_MUL]], %[[BOX8]][7, 0, 2] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
// CHECK: %[[BASE_PTR:.*]] = llvm.getelementptr %[[X]][%[[ZERO]], %[[ADJUSTED_OFFSET]], 0] : (!llvm.ptr, i64, i64) -> !llvm.ptr, !llvm.array<20 x struct<"_QFtest_dt_sliceTt", (i32, i32)>>
// CHECK: %[[BOX10:.*]] = llvm.insertvalue %[[BASE_PTR]], %[[BOX9]][0] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
-// CHECK: llvm.store %[[BOX10]], %[[ALLOCA]] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>, !llvm.ptr
+// CHECK: ptr.store %[[BOX10]], %[[ALLOCA]] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>, !llvm.ptr
// CHECK: llvm.call @_QPtest_dt_callee(%1) : (!llvm.ptr) -> ()
// Conversion with a subcomponent that indexes a 2d array field in a derived type.
@@ -2089,11 +2089,11 @@ func.func @ext_array_coor3(%arg0: !fir.box<!fir.array<?xi32>>) {
// CHECK: %[[IDX:.*]] = llvm.sub %[[C0]], %[[C1]] overflow<nsw> : i64
// CHECK: %[[DIFF0:.*]] = llvm.mul %[[IDX]], %[[C1]] overflow<nsw> : i64
// CHECK: %[[GEPSTRIDE:.*]] = llvm.getelementptr %[[ARG0]][0, 7, 0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
-// CHECK: %[[LOADEDSTRIDE:.*]] = llvm.load %[[GEPSTRIDE]] : !llvm.ptr -> i64
+// CHECK: %[[LOADEDSTRIDE:.*]] = ptr.load %[[GEPSTRIDE]] : !llvm.ptr -> i64
// CHECK: %[[SC:.*]] = llvm.mul %[[DIFF0]], %[[LOADEDSTRIDE]] overflow<nsw> : i64
// CHECK: %[[OFFSET:.*]] = llvm.add %[[SC]], %[[C0_1]] overflow<nsw> : i64
// CHECK: %[[GEPADDR:.*]] = llvm.getelementptr %[[ARG0]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
-// CHECK: %[[LOADEDADDR:.*]] = llvm.load %[[GEPADDR]] : !llvm.ptr -> !llvm.ptr
+// CHECK: %[[LOADEDADDR:.*]] = ptr.load %[[GEPADDR]] : !llvm.ptr -> !llvm.ptr
// CHECK: %[[GEPADDROFFSET:.*]] = llvm.getelementptr %[[LOADEDADDR]][%[[OFFSET]]] : (!llvm.ptr, i64) -> !llvm.ptr, i8
// Conversion with non zero shift and slice.
@@ -2254,7 +2254,7 @@ func.func @test_rebox_1(%arg0: !fir.box<!fir.array<?x?xf32>>) {
//CHECK: %[[FLOAT_TYPE:.*]] = llvm.mlir.constant(27 : i32) : i32
//CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
//CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
-//CHECK: %[[ELEM_SIZE_I64:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64
+//CHECK: %[[ELEM_SIZE_I64:.*]] = ptr.ptrtoint %[[GEP]] : !llvm.ptr to i64
//CHECK: %[[RBOX:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
//CHECK: %[[RBOX_TMP1:.*]] = llvm.insertvalue %[[ELEM_SIZE_I64]], %[[RBOX]][1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
//CHECK: %[[CFI_VERSION:.*]] = llvm.mlir.constant(20180515 : i32) : i32
@@ -2271,11 +2271,11 @@ func.func @test_rebox_1(%arg0: !fir.box<!fir.array<?x?xf32>>) {
//CHECK: %[[ADDENDUM_I8:.*]] = llvm.trunc %[[ADDENDUM]] : i32 to i8
//CHECK: %[[RBOX_TMP6:.*]] = llvm.insertvalue %[[ADDENDUM_I8]], %[[RBOX_TMP5]][6] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
//CHECK: %[[DIM1_STRIDE_REF:.*]] = llvm.getelementptr %[[ARG0]][0, 7, 0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-//CHECK: %[[DIM1_STRIDE:.*]] = llvm.load %[[DIM1_STRIDE_REF]] : !llvm.ptr -> i64
+//CHECK: %[[DIM1_STRIDE:.*]] = ptr.load %[[DIM1_STRIDE_REF]] : !llvm.ptr -> i64
//CHECK: %[[DIM2_STRIDE_REF:.*]] = llvm.getelementptr %[[ARG0]][0, 7, 1, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-//CHECK: %[[DIM2_STRIDE:.*]] = llvm.load %[[DIM2_STRIDE_REF]] : !llvm.ptr -> i64
+//CHECK: %[[DIM2_STRIDE:.*]] = ptr.load %[[DIM2_STRIDE_REF]] : !llvm.ptr -> i64
//CHECK: %[[SOURCE_ARRAY_PTR:.*]] = llvm.getelementptr %[[ARG0]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-//CHECK: %[[SOURCE_ARRAY:.*]] = llvm.load %[[SOURCE_ARRAY_PTR]] : !llvm.ptr -> !llvm.ptr
+//CHECK: %[[SOURCE_ARRAY:.*]] = ptr.load %[[SOURCE_ARRAY_PTR]] : !llvm.ptr -> !llvm.ptr
//CHECK: %[[ZERO_ELEMS:.*]] = llvm.mlir.constant(0 : i64) : i64
//CHECK: %[[DIM1_LB_DIFF:.*]] = llvm.sub %[[FIVE]], %[[THREE]] : i64
//CHECK: %[[DIM1_LB_OFFSET:.*]] = llvm.mul %[[DIM1_LB_DIFF]], %[[DIM1_STRIDE]] : i64
@@ -2294,7 +2294,7 @@ func.func @test_rebox_1(%arg0: !fir.box<!fir.array<?x?xf32>>) {
//CHECK: %[[RBOX_TMP7_2:.*]] = llvm.insertvalue %[[RESULT_NELEMS]], %[[RBOX_TMP7_1]][7, 0, 1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
//CHECK: %[[RBOX_TMP7_3:.*]] = llvm.insertvalue %[[RESULT_STRIDE]], %[[RBOX_TMP7_2]][7, 0, 2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
//CHECK: %[[RESULT_BOX:.*]] = llvm.insertvalue %[[RESULT_PTR]], %[[RBOX_TMP7_3]][0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
-//CHECK: llvm.store %[[RESULT_BOX]], %[[RESULT_BOX_REF]] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>, !llvm.ptr
+//CHECK: ptr.store %[[RESULT_BOX]], %[[RESULT_BOX_REF]] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>, !llvm.ptr
//CHECK: llvm.call @bar1(%[[RESULT_BOX_REF]]) : (!llvm.ptr) -> ()
@@ -2325,7 +2325,7 @@ func.func @foo(%arg0: !fir.box<!fir.array<?x!fir.type<t{i:i32,c:!fir.char<1,10>}
//CHECK: %[[TYPE_CHAR:.*]] = llvm.mlir.constant(40 : i32) : i32
//CHECK: %[[NULL:.*]] = llvm.mlir.zero : !llvm.ptr
//CHECK: %[[GEP:.*]] = llvm.getelementptr %[[NULL]][1]
-//CHECK: %[[CHAR_SIZE:.*]] = llvm.ptrtoint %[[GEP]] : !llvm.ptr to i64
+//CHECK: %[[CHAR_SIZE:.*]] = ptr.ptrtoint %[[GEP]] : !llvm.ptr to i64
//CHECK: %[[ELEM_SIZE:.*]] = llvm.mul %[[CHAR_SIZE]], %[[ELEM_COUNT]]
//CHECK: %[[RBOX_TMP1:.*]] = llvm.insertvalue %[[ELEM_SIZE]], %{{.*}}[1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
//CHECK: %[[RBOX_TMP2:.*]] = llvm.insertvalue %{{.*}}, %[[RBOX_TMP1]][2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
@@ -2339,9 +2339,9 @@ func.func @foo(%arg0: !fir.box<!fir.array<?x!fir.type<t{i:i32,c:!fir.char<1,10>}
//CHECK: %[[ADDENDUM_I8:.*]] = llvm.trunc %[[ADDENDUM]] : i32 to i8
//CHECK: %[[RBOX_TMP6:.*]] = llvm.insertvalue %[[ADDENDUM_I8]], %[[RBOX_TMP5]][6] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
//CHECK: %[[SRC_STRIDE_PTR:.*]] = llvm.getelementptr %[[ARG0]][0, 7, 0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-//CHECK: %[[SRC_STRIDE:.*]] = llvm.load %[[SRC_STRIDE_PTR]] : !llvm.ptr -> i64
+//CHECK: %[[SRC_STRIDE:.*]] = ptr.load %[[SRC_STRIDE_PTR]] : !llvm.ptr -> i64
//CHECK: %[[SRC_ARRAY_PTR:.*]] = llvm.getelementptr %[[ARG0]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-//CHECK: %[[SRC_ARRAY:.*]] = llvm.load %[[SRC_ARRAY_PTR]] : !llvm.ptr -> !llvm.ptr
+//CHECK: %[[SRC_ARRAY:.*]] = ptr.load %[[SRC_ARRAY_PTR]] : !llvm.ptr -> !llvm.ptr
//CHECK: %[[ZERO_6:.*]] = llvm.mlir.constant(0 : i64) : i64
//CHECK: %[[COMPONENT:.*]] = llvm.getelementptr %[[SRC_ARRAY]][%[[ZERO_6]], 1, %[[COMPONENT_OFFSET_1]]] : (!llvm.ptr, i64, i64) -> !llvm.ptr, !llvm.struct<"t", (i32, array<10 x i8>)>
//CHECK: %[[SRC_LB:.*]] = llvm.mlir.constant(1 : i64) : i64
@@ -2359,7 +2359,7 @@ func.func @foo(%arg0: !fir.box<!fir.array<?x!fir.type<t{i:i32,c:!fir.char<1,10>}
//CHECK: %[[RBOX_TMP7_2:.*]] = llvm.insertvalue %[[RESULT_NELEMS]], %[[RBOX_TMP7_1]][7, 0, 1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
//CHECK: %[[RBOX_TMP7_3:.*]] = llvm.insertvalue %[[RESULT_TOTAL_STRIDE]], %[[RBOX_TMP7_2]][7, 0, 2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
//CHECK: %[[RESULT_BOX:.*]] = llvm.insertvalue %[[RESULT_PTR]], %[[RBOX_TMP7_3]][0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
-//CHECK: llvm.store %[[RESULT_BOX]], %[[RESULT_BOX_REF]] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>, !llvm.ptr
+//CHECK: ptr.store %[[RESULT_BOX]], %[[RESULT_BOX_REF]] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>, !llvm.ptr
//CHECK: llvm.call @bar(%[[RESULT_BOX_REF]]) : (!llvm.ptr) -> ()
//CHECK: llvm.return
//CHECK: }
@@ -2408,7 +2408,7 @@ func.func @coordinate_box_derived_1(%arg0: !fir.box<!fir.type<derived_1{field_1:
// CHECK-SAME: %[[BOX:.*]]: !llvm.ptr)
// CHECK: %[[COORDINATE:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[DERIVED_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i64>)>
-// CHECK: %[[DERIVED_VAL:.*]] = llvm.load %[[DERIVED_ADDR]] : !llvm.ptr -> !llvm.ptr
+// CHECK: %[[DERIVED_VAL:.*]] = ptr.load %[[DERIVED_ADDR]] : !llvm.ptr -> !llvm.ptr
// CHECK: %[[SUBOBJECT_ADDR:.*]] = llvm.getelementptr %[[DERIVED_VAL]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"derived_1", (i32, i32)>
// CHECK-NEXT: llvm.return
@@ -2425,7 +2425,7 @@ func.func @coordinate_box_derived_2(%arg0: !fir.box<!fir.type<derived_2{field_1:
// CHECK-NEXT: %[[C0_0:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK-NEXT: %[[C1:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[DERIVED_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}32, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i64>)>
-// CHECK-NEXT: %[[DERIVED_VAL:.*]] = llvm.load %[[DERIVED_ADDR]] : !llvm.ptr -> !llvm.ptr
+// CHECK-NEXT: %[[DERIVED_VAL:.*]] = ptr.load %[[DERIVED_ADDR]] : !llvm.ptr -> !llvm.ptr
// CHECK-NEXT: %[[ANOTHER_DERIVED_ADDR:.*]] = llvm.getelementptr %[[DERIVED_VAL]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"derived_2", (struct<"another_derived", (i32, f32)>, i32)>
// CHECK-NEXT: %[[SUBOBJECT_ADDR:.*]] = llvm.getelementptr %[[ANOTHER_DERIVED_ADDR]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<"another_derived", (i32, f32)>
// CHECK-NEXT: llvm.return
@@ -2447,11 +2447,11 @@ func.func @coordinate_box_array_1d(%arg0: !fir.box<!fir.array<10 x f32>>, %arg1:
// CHECK-SAME: %[[COORDINATE:.*]]: i64
// There's only one box here. Its index is `0`. Generate it.
// CHECK: %[[ARRAY_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
-// CHECK-NEXT: %[[ARRAY_OBJECT:.*]] = llvm.load %[[ARRAY_ADDR]] : !llvm.ptr -> !llvm.ptr
+// CHECK-NEXT: %[[ARRAY_OBJECT:.*]] = ptr.load %[[ARRAY_ADDR]] : !llvm.ptr -> !llvm.ptr
// CHECK-NEXT: %[[OFFSET_INIT:.*]] = llvm.mlir.constant(0 : i64) : i64
// Index of the 1st CFI_dim_t object (corresonds the the 1st dimension)
// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 7, 0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
-// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_VAL:.*]] = llvm.load %[[DIM_1_MEM_STRIDE_ADDR]] : !llvm.ptr -> i64
+// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_VAL:.*]] = ptr.load %[[DIM_1_MEM_STRIDE_ADDR]] : !llvm.ptr -> i64
// CHECK-NEXT: %[[BYTE_OFFSET:.*]] = llvm.mul %[[COORDINATE]], %[[DIM_1_MEM_STRIDE_VAL]] overflow<nsw> : i64
// CHECK-NEXT: %[[SUBOJECT_OFFSET:.*]] = llvm.add %[[BYTE_OFFSET]], %[[OFFSET_INIT]] overflow<nsw> : i64
// CHECK-NEXT: %[[SUBOBJECT_ADDR:.*]] = llvm.getelementptr %[[ARRAY_OBJECT]][%[[SUBOJECT_OFFSET]]] : (!llvm.ptr, i64) -> !llvm.ptr, i8
@@ -2466,11 +2466,11 @@ func.func @coordinate_of_box_dynamic_array_1d(%arg0: !fir.box<!fir.array<? x f32
// CHECK-SAME: %[[BOX:.*]]: !llvm.ptr
// CHECK-SAME: %[[COORDINATE:.*]]: i64
// CHECK-NEXT: %[[ARRAY_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
-// CHECK-NEXT: %[[ARRAY_OBJECT:.*]] = llvm.load %[[ARRAY_ADDR]] : !llvm.ptr -> !llvm.ptr
+// CHECK-NEXT: %[[ARRAY_OBJECT:.*]] = ptr.load %[[ARRAY_ADDR]] : !llvm.ptr -> !llvm.ptr
// CHECK-NEXT: %[[OFFSET_INIT:.*]] = llvm.mlir.constant(0 : i64) : i64
// Index of the 1st CFI_dim_t object (corresonds the the 1st dimension)
// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 7, 0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>
-// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_VAL:.*]] = llvm.load %[[DIM_1_MEM_STRIDE_ADDR]] : !llvm.ptr -> i64
+// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_VAL:.*]] = ptr.load %[[DIM_1_MEM_STRIDE_ADDR]] : !llvm.ptr -> i64
// CHECK-NEXT: %[[BYTE_OFFSET:.*]] = llvm.mul %[[COORDINATE]], %[[DIM_1_MEM_STRIDE_VAL]] overflow<nsw> : i64
// CHECK-NEXT: %[[SUBOJECT_OFFSET:.*]] = llvm.add %[[BYTE_OFFSET]], %[[OFFSET_INIT]] overflow<nsw> : i64
// CHECK-NEXT: %[[SUBOBJECT_ADDR:.*]] = llvm.getelementptr %[[ARRAY_OBJECT]][%[[SUBOJECT_OFFSET]]] : (!llvm.ptr, i64) -> !llvm.ptr, i8
@@ -2487,16 +2487,16 @@ func.func @coordinate_box_array_2d(%arg0: !fir.box<!fir.array<10 x 10 x f32>>, %
// CHECK-SAME: %[[BOX:.*]]: !llvm.ptr
// CHECK-SAME: %[[COORDINATE_1:.*]]: i64, %[[COORDINATE_2:.*]]: i64)
// CHECK-NEXT: %[[ARRAY_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
-// CHECK-NEXT: %[[ARRAY_OBJECT:.*]] = llvm.load %[[ARRAY_ADDR]] : !llvm.ptr -> !llvm.ptr
+// CHECK-NEXT: %[[ARRAY_OBJECT:.*]] = ptr.load %[[ARRAY_ADDR]] : !llvm.ptr -> !llvm.ptr
// CHECK-NEXT: %[[OFFSET_INIT:.*]] = llvm.mlir.constant(0 : i64) : i64
// Index of the 1st CFI_dim_t object (corresonds the the 1st dimension)
// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 7, 0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
-// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_VAL:.*]] = llvm.load %[[DIM_1_MEM_STRIDE_ADDR]] : !llvm.ptr -> i64
+// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_VAL:.*]] = ptr.load %[[DIM_1_MEM_STRIDE_ADDR]] : !llvm.ptr -> i64
// CHECK-NEXT: %[[BYTE_OFFSET_1:.*]] = llvm.mul %[[COORDINATE_1]], %[[DIM_1_MEM_STRIDE_VAL]] overflow<nsw> : i64
// CHECK-NEXT: %[[SUBOBJECT_OFFSET_1:.*]] = llvm.add %[[BYTE_OFFSET]], %[[OFFSET_INIT]] overflow<nsw> : i64
// Index of the 1st CFI_dim_t object (corresonds the the 2nd dimension)
// CHECK-NEXT: %[[DIM_2_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 7, 1, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>
-// CHECK-NEXT: %[[DIM_2_MEM_STRIDE_VAL:.*]] = llvm.load %[[DIM_2_MEM_STRIDE_ADDR]] : !llvm.ptr -> i64
+// CHECK-NEXT: %[[DIM_2_MEM_STRIDE_VAL:.*]] = ptr.load %[[DIM_2_MEM_STRIDE_ADDR]] : !llvm.ptr -> i64
// CHECK-NEXT: %[[BYTE_OFFSET_2:.*]] = llvm.mul %[[COORDINATE_2]], %[[DIM_2_MEM_STRIDE_VAL]] overflow<nsw> : i64
// CHECK-NEXT: %[[SUBOBJECT_OFFSET_2:.*]] = llvm.add %[[BYTE_OFFSET_2]], %[[SUBOBJECT_OFFSET_1]] overflow<nsw> : i64
// CHECK-NEXT: %[[SUBOBJECT_ADDR:.*]] = llvm.getelementptr %[[ARRAY_OBJECT]][%[[SUBOBJECT_OFFSET_2]]] : (!llvm.ptr, i64) -> !llvm.ptr, i8
@@ -2516,10 +2516,10 @@ func.func @coordinate_box_derived_inside_array(%arg0: !fir.box<!fir.array<10 x !
// CHECK-SAME: %[[BOX:.*]]: !llvm.ptr,
// CHECK-SAME: %[[COORDINATE_1:.*]]: i64) {
// CHECK: %[[VAL_6:.*]] = llvm.getelementptr %[[BOX]]{{\[}}0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[ARRAY:.*]] = llvm.load %[[VAL_6]] : !llvm.ptr -> !llvm.ptr
+// CHECK: %[[ARRAY:.*]] = ptr.load %[[VAL_6]] : !llvm.ptr -> !llvm.ptr
// CHECK: %[[VAL_8:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[VAL_13:.*]] = llvm.getelementptr %[[BOX]][0, 7, 0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_14:.*]] = llvm.load %[[VAL_13]] : !llvm.ptr -> i64
+// CHECK: %[[VAL_14:.*]] = ptr.load %[[VAL_13]] : !llvm.ptr -> i64
// CHECK: %[[VAL_15:.*]] = llvm.mul %[[COORDINATE_1]], %[[VAL_14]] overflow<nsw> : i64
// CHECK: %[[OFFSET:.*]] = llvm.add %[[VAL_15]], %[[VAL_8]] overflow<nsw> : i64
// CHECK: %[[DERIVED:.*]] = llvm.getelementptr %[[ARRAY]][%[[OFFSET]]] : (!llvm.ptr, i64) -> !llvm.ptr, i8
diff --git a/flang/test/Fir/embox-char.fir b/flang/test/Fir/embox-char.fir
index 30015a5f7ae380..f505fde95e4dae 100644
--- a/flang/test/Fir/embox-char.fir
+++ b/flang/test/Fir/embox-char.fir
@@ -20,29 +20,29 @@
// CHECK: %[[VAL_11:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK: %[[VAL_12:.*]] = llvm.mlir.constant(1 : index) : i64
// CHECK: %[[VAL_13_WIDTH:.*]] = llvm.mlir.constant(4 : index) : i64
-// CHECK: %[[VAL_14:.*]] = llvm.load %[[VAL_0]] : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: llvm.store %[[VAL_14]], %[[VAL_10]] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>, !llvm.ptr
+// CHECK: %[[VAL_14:.*]] = ptr.load %[[VAL_0]] : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
+// CHECK: ptr.store %[[VAL_14]], %[[VAL_10]] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>, !llvm.ptr
// CHECK: %[[VAL_15:.*]] = llvm.getelementptr %[[VAL_10]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_16_BYTESIZE:.*]] = llvm.load %[[VAL_15]] : !llvm.ptr -> i64
+// CHECK: %[[VAL_16_BYTESIZE:.*]] = ptr.load %[[VAL_15]] : !llvm.ptr -> i64
// CHECK: %[[VAL_17:.*]] = llvm.getelementptr %[[VAL_10]][0, 7, %[[VAL_12]], 0] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_18_LB1:.*]] = llvm.load %[[VAL_17]] : !llvm.ptr -> i64
+// CHECK: %[[VAL_18_LB1:.*]] = ptr.load %[[VAL_17]] : !llvm.ptr -> i64
// CHECK: %[[VAL_19:.*]] = llvm.getelementptr %[[VAL_10]][0, 7, %[[VAL_12]], 1] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_20_EX1:.*]] = llvm.load %[[VAL_19]] : !llvm.ptr -> i64
+// CHECK: %[[VAL_20_EX1:.*]] = ptr.load %[[VAL_19]] : !llvm.ptr -> i64
// CHECK: %[[VAL_21:.*]] = llvm.getelementptr %[[VAL_10]][0, 7, %[[VAL_12]], 2] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_22_ST1:.*]] = llvm.load %[[VAL_21]] : !llvm.ptr -> i64
+// CHECK: %[[VAL_22_ST1:.*]] = ptr.load %[[VAL_21]] : !llvm.ptr -> i64
// CHECK: %[[VAL_23:.*]] = llvm.getelementptr %[[VAL_10]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_24_BASEPTR:.*]] = llvm.load %[[VAL_23]] : !llvm.ptr -> !llvm.ptr
+// CHECK: %[[VAL_24_BASEPTR:.*]] = ptr.load %[[VAL_23]] : !llvm.ptr -> !llvm.ptr
// CHECK: %[[VAL_25:.*]] = llvm.getelementptr %[[VAL_10]][0, 7, %[[VAL_11]], 0] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_26_LB0:.*]] = llvm.load %[[VAL_25]] : !llvm.ptr -> i64
+// CHECK: %[[VAL_26_LB0:.*]] = ptr.load %[[VAL_25]] : !llvm.ptr -> i64
// CHECK: %[[VAL_27:.*]] = llvm.getelementptr %[[VAL_10]][0, 7, %[[VAL_11]], 1] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_28_EX0:.*]] = llvm.load %[[VAL_27]] : !llvm.ptr -> i64
+// CHECK: %[[VAL_28_EX0:.*]] = ptr.load %[[VAL_27]] : !llvm.ptr -> i64
// CHECK: %[[VAL_29:.*]] = llvm.getelementptr %[[VAL_10]][0, 7, %[[VAL_11]], 2] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_30_ST0:.*]] = llvm.load %[[VAL_29]] : !llvm.ptr -> i64
+// CHECK: %[[VAL_30_ST0:.*]] = ptr.load %[[VAL_29]] : !llvm.ptr -> i64
// CHECK: %[[VAL_31_LEN:.*]] = llvm.sdiv %[[VAL_16_BYTESIZE]], %[[VAL_13_WIDTH]] : i64
// CHECK: %[[VAL_32:.*]] = llvm.mlir.constant(44 : i32) : i32
// CHECK: %[[VAL_33:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[VAL_34:.*]] = llvm.getelementptr %[[VAL_33]][1] : (!llvm.ptr) -> !llvm.ptr, i32
-// CHECK: %[[VAL_35:.*]] = llvm.ptrtoint %[[VAL_34]] : !llvm.ptr to i64
+// CHECK: %[[VAL_35:.*]] = ptr.ptrtoint %[[VAL_34]] : !llvm.ptr to i64
// CHECK: %[[VAL_36_BYTESIZE:.*]] = llvm.mul %[[VAL_35]], %[[VAL_31_LEN]] : i64
// CHECK: %[[VAL_37:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
// CHECK: %[[VAL_38:.*]] = llvm.insertvalue %[[VAL_36_BYTESIZE]], %[[VAL_37]][1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
@@ -91,7 +91,7 @@
// CHECK: %[[VAL_81:.*]] = llvm.mul %[[VAL_67]], %[[VAL_20_EX1]] : i64
// CHECK: %[[VAL_82:.*]] = llvm.getelementptr %[[VAL_24_BASEPTR]]{{\[}}%[[VAL_70_OFFSET]]] : (!llvm.ptr, i64) -> !llvm.ptr, i32
// CHECK: %[[VAL_84:.*]] = llvm.insertvalue %[[VAL_82]], %[[VAL_79]][0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: llvm.store %[[VAL_84]], %[[VAL_8]] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>, !llvm.ptr
+// CHECK: ptr.store %[[VAL_84]], %[[VAL_8]] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>, !llvm.ptr
// CHECK: llvm.return
// CHECK: }
func.func @test_char4(%arg0: !fir.ref<!fir.box<!fir.heap<!fir.array<?x?x!fir.char<4,?>>>>>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index, %arg5 : index, %arg6 : index) {
@@ -117,28 +117,28 @@ func.func @test_char4(%arg0: !fir.ref<!fir.box<!fir.heap<!fir.array<?x?x!fir.cha
// CHECK: %[[VAL_10:.*]] = llvm.alloca %[[VAL_9]] x !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)> {alignment = 8 : i64} : (i32) -> !llvm.ptr
// CHECK: %[[VAL_11:.*]] = llvm.mlir.constant(0 : index) : i64
// CHECK: %[[VAL_12_c1:.*]] = llvm.mlir.constant(1 : index) : i64
-// CHECK: %[[VAL_14:.*]] = llvm.load %[[VAL_0]] : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: llvm.store %[[VAL_14]], %[[VAL_10]] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>, !llvm.ptr
+// CHECK: %[[VAL_14:.*]] = ptr.load %[[VAL_0]] : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
+// CHECK: ptr.store %[[VAL_14]], %[[VAL_10]] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>, !llvm.ptr
// CHECK: %[[VAL_15:.*]] = llvm.getelementptr %[[VAL_10]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_16_BYTESIZE:.*]] = llvm.load %[[VAL_15]] : !llvm.ptr -> i64
+// CHECK: %[[VAL_16_BYTESIZE:.*]] = ptr.load %[[VAL_15]] : !llvm.ptr -> i64
// CHECK: %[[VAL_17:.*]] = llvm.getelementptr %[[VAL_10]][0, 7, %[[VAL_12]], 0] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_18_LB1:.*]] = llvm.load %[[VAL_17]] : !llvm.ptr -> i64
+// CHECK: %[[VAL_18_LB1:.*]] = ptr.load %[[VAL_17]] : !llvm.ptr -> i64
// CHECK: %[[VAL_19:.*]] = llvm.getelementptr %[[VAL_10]][0, 7, %[[VAL_12]], 1] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_20_EX1:.*]] = llvm.load %[[VAL_19]] : !llvm.ptr -> i64
+// CHECK: %[[VAL_20_EX1:.*]] = ptr.load %[[VAL_19]] : !llvm.ptr -> i64
// CHECK: %[[VAL_21:.*]] = llvm.getelementptr %[[VAL_10]][0, 7, %[[VAL_12]], 2] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_22_ST1:.*]] = llvm.load %[[VAL_21]] : !llvm.ptr -> i64
+// CHECK: %[[VAL_22_ST1:.*]] = ptr.load %[[VAL_21]] : !llvm.ptr -> i64
// CHECK: %[[VAL_23:.*]] = llvm.getelementptr %[[VAL_10]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_24_BASEPTR:.*]] = llvm.load %[[VAL_23]] : !llvm.ptr -> !llvm.ptr
+// CHECK: %[[VAL_24_BASEPTR:.*]] = ptr.load %[[VAL_23]] : !llvm.ptr -> !llvm.ptr
// CHECK: %[[VAL_25:.*]] = llvm.getelementptr %[[VAL_10]][0, 7, %[[VAL_11]], 0] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_26_LB0:.*]] = llvm.load %[[VAL_25]] : !llvm.ptr -> i64
+// CHECK: %[[VAL_26_LB0:.*]] = ptr.load %[[VAL_25]] : !llvm.ptr -> i64
// CHECK: %[[VAL_27:.*]] = llvm.getelementptr %[[VAL_10]][0, 7, %[[VAL_11]], 1] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_28_EX0:.*]] = llvm.load %[[VAL_27]] : !llvm.ptr -> i64
+// CHECK: %[[VAL_28_EX0:.*]] = ptr.load %[[VAL_27]] : !llvm.ptr -> i64
// CHECK: %[[VAL_29:.*]] = llvm.getelementptr %[[VAL_10]][0, 7, %[[VAL_11]], 2] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: %[[VAL_30_ST0:.*]] = llvm.load %[[VAL_29]] : !llvm.ptr -> i64
+// CHECK: %[[VAL_30_ST0:.*]] = ptr.load %[[VAL_29]] : !llvm.ptr -> i64
// CHECK: %[[VAL_32:.*]] = llvm.mlir.constant(40 : i32) : i32
// CHECK: %[[VAL_33:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[VAL_34:.*]] = llvm.getelementptr %[[VAL_33]][1] : (!llvm.ptr) -> !llvm.ptr, i8
-// CHECK: %[[VAL_35:.*]] = llvm.ptrtoint %[[VAL_34]] : !llvm.ptr to i64
+// CHECK: %[[VAL_35:.*]] = ptr.ptrtoint %[[VAL_34]] : !llvm.ptr to i64
// CHECK: %[[VAL_36_BYTESIZE:.*]] = llvm.mul %[[VAL_35]], %[[VAL_16_BYTESIZE]] : i64
// CHECK: %[[VAL_37:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
// CHECK: %[[VAL_38:.*]] = llvm.insertvalue %[[VAL_36_BYTESIZE]], %[[VAL_37]][1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
@@ -187,7 +187,7 @@ func.func @test_char4(%arg0: !fir.ref<!fir.box<!fir.heap<!fir.array<?x?x!fir.cha
// CHECK: %[[VAL_81:.*]] = llvm.mul %[[VAL_67]], %[[VAL_20_EX1]] : i64
// CHECK: %[[VAL_82:.*]] = llvm.getelementptr %[[VAL_24_BASEPTR]]{{\[}}%[[VAL_70_OFFSET]]] : (!llvm.ptr, i64) -> !llvm.ptr, i8
// CHECK: %[[VAL_84:.*]] = llvm.insertvalue %[[VAL_82]], %[[VAL_79]][0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>
-// CHECK: llvm.store %[[VAL_84]], %[[VAL_8]] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>, !llvm.ptr
+// CHECK: ptr.store %[[VAL_84]], %[[VAL_8]] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>, !llvm.ptr
// CHECK: llvm.return
// CHECK: }
func.func @test_char1(%arg0: !fir.ref<!fir.box<!fir.heap<!fir.array<?x?x!fir.char<1,?>>>>>, %arg1 : index, %arg2 : index, %arg3 : index, %arg4 : index, %arg5 : index, %arg6 : index) {
diff --git a/flang/test/Fir/embox-substring.fir b/flang/test/Fir/embox-substring.fir
index f2042f9bda7fc4..882c1b2a2255ce 100644
--- a/flang/test/Fir/embox-substring.fir
+++ b/flang/test/Fir/embox-substring.fir
@@ -32,7 +32,7 @@ func.func private @dump(!fir.box<!fir.array<2x!fir.char<1>>>)
// CHECK: llvm.getelementptr
// CHECK: %[[VAL_28:.*]] = llvm.mlir.zero : !llvm.ptr
// CHECK: %[[VAL_29:.*]] = llvm.getelementptr %[[VAL_28]][1] : (!llvm.ptr) -> !llvm.ptr, i8
-// CHECK: %[[VAL_30:.*]] = llvm.ptrtoint %[[VAL_29]] : !llvm.ptr to i64
+// CHECK: %[[VAL_30:.*]] = ptr.ptrtoint %[[VAL_29]] : !llvm.ptr to i64
// CHECK: %[[VAL_31:.*]] = llvm.mul %[[VAL_30]], %[[VAL_1]] : i64
// CHECK: %[[VAL_42:.*]] = llvm.mul %[[VAL_31]], %[[VAL_5]] : i64
// CHECK: %[[VAL_43:.*]] = llvm.insertvalue %[[VAL_42]], %{{.*}}[7, 0, 2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
diff --git a/flang/test/Fir/rebox-susbtring.fir b/flang/test/Fir/rebox-susbtring.fir
index 8f7f4facd13a27..d9c744e3ae40bc 100644
--- a/flang/test/Fir/rebox-susbtring.fir
+++ b/flang/test/Fir/rebox-susbtring.fir
@@ -21,7 +21,7 @@ func.func @char_section(%arg0: !fir.box<!fir.array<?x!fir.char<1,20>>>) {
// CHECK: %[[VAL_4:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[VAL_37:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<{{.*}}>
-// CHECK: %[[VAL_38:.*]] = llvm.load %[[VAL_37]] : !llvm.ptr -> !llvm.ptr
+// CHECK: %[[VAL_38:.*]] = ptr.load %[[VAL_37]] : !llvm.ptr -> !llvm.ptr
// CHECK: %[[VAL_30:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[VAL_40:.*]] = llvm.getelementptr %[[VAL_38]]{{\[}}%[[VAL_30]], %[[VAL_4]]] : (!llvm.ptr, i64, i64) -> !llvm.ptr, !llvm.array<20 x i8>
@@ -51,7 +51,7 @@ func.func @foo(%arg0: !fir.box<!fir.array<?x!fir.type<t{i:i32,c:!fir.char<1,10>}
// CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(1 : i32) : i32
// CHECK: %[[VAL_30:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<{{.*}}>
-// CHECK: %[[VAL_31:.*]] = llvm.load %[[VAL_30]] : !llvm.ptr -> !llvm.ptr
+// CHECK: %[[VAL_31:.*]] = ptr.load %[[VAL_30]] : !llvm.ptr -> !llvm.ptr
// CHECK: %[[VAL_21:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[VAL_33:.*]] = llvm.getelementptr %[[VAL_31]]{{\[}}%[[VAL_21]], 1, %[[VAL_4]]] : (!llvm.ptr, i64, i64) -> !llvm.ptr, !llvm.struct<{{.*}}>
diff --git a/flang/test/Fir/tbaa.fir b/flang/test/Fir/tbaa.fir
index 4474bbbe3dd747..9fc647ca5e16b4 100644
--- a/flang/test/Fir/tbaa.fir
+++ b/flang/test/Fir/tbaa.fir
@@ -37,21 +37,21 @@ module {
// CHECK: %[[VAL_4:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[VAL_5:.*]] = llvm.mlir.constant(10 : i32) : i32
// CHECK: %[[VAL_6:.*]] = llvm.getelementptr %[[VAL_0]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_7:.*]] = llvm.load %[[VAL_6]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.ptr
+// CHECK: %[[VAL_7:.*]] = ptr.load %[[VAL_6]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.ptr
// CHECK: %[[VAL_8:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[VAL_9:.*]] = llvm.getelementptr %[[VAL_0]][0, 7, 0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_10:.*]] = llvm.load %[[VAL_9]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
+// CHECK: %[[VAL_10:.*]] = ptr.load %[[VAL_9]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
// CHECK: %[[VAL_11:.*]] = llvm.mul %[[VAL_4]], %[[VAL_10]] overflow<nsw> : i64
// CHECK: %[[VAL_12:.*]] = llvm.add %[[VAL_11]], %[[VAL_8]] overflow<nsw> : i64
// CHECK: %[[VAL_14:.*]] = llvm.getelementptr %[[VAL_7]]{{\[}}%[[VAL_12]]] : (!llvm.ptr, i64) -> !llvm.ptr, i8
// CHECK: %[[VAL_16:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[VAL_17:.*]] = llvm.mlir.constant(-1 : i32) : i32
// CHECK: %[[VAL_18:.*]] = llvm.getelementptr %[[VAL_0]][0, 8] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_19:.*]] = llvm.load %[[VAL_18]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.ptr
+// CHECK: %[[VAL_19:.*]] = ptr.load %[[VAL_18]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.ptr
// CHECK: %[[VAL_20:.*]] = llvm.getelementptr %[[VAL_0]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_21:.*]] = llvm.load %[[VAL_20]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
+// CHECK: %[[VAL_21:.*]] = ptr.load %[[VAL_20]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
// CHECK: %[[VAL_22:.*]] = llvm.getelementptr %[[VAL_0]][0, 4] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_23:.*]] = llvm.load %[[VAL_22]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
+// CHECK: %[[VAL_23:.*]] = ptr.load %[[VAL_22]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
// CHECK: %[[VAL_24:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, ptr, array<1 x i64>)>
// CHECK: %[[VAL_25:.*]] = llvm.insertvalue %[[VAL_21]], %[[VAL_24]][1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, ptr, array<1 x i64>)>
// CHECK: %[[VAL_26:.*]] = llvm.mlir.constant(20180515 : i32) : i32
@@ -69,15 +69,15 @@ module {
// CHECK: %[[VAL_38:.*]] = llvm.insertvalue %[[VAL_37]], %[[VAL_35]][6] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, ptr, array<1 x i64>)>
// CHECK: %[[VAL_40:.*]] = llvm.insertvalue %[[VAL_19]], %[[VAL_38]][7] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, ptr, array<1 x i64>)>
// CHECK: %[[VAL_42:.*]] = llvm.insertvalue %[[VAL_14]], %[[VAL_40]][0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, ptr, array<1 x i64>)>
-// CHECK: llvm.store %[[VAL_42]], %[[VAL_2]] {tbaa = [#[[$BOXT]]]} : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, ptr, array<1 x i64>)>, !llvm.ptr
+// CHECK: ptr.store %[[VAL_42]], %[[VAL_2]] {tbaa = [#[[$BOXT]]]} : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, ptr, array<1 x i64>)>, !llvm.ptr
// CHECK: %[[VAL_43:.*]] = llvm.getelementptr %[[VAL_2]][0, 4] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_44:.*]] = llvm.load %[[VAL_43]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i8
+// CHECK: %[[VAL_44:.*]] = ptr.load %[[VAL_43]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i8
// CHECK: %[[VAL_45:.*]] = llvm.icmp "eq" %[[VAL_44]], %[[VAL_3]] : i8
// CHECK: llvm.cond_br %[[VAL_45]], ^bb1, ^bb2
// CHECK: ^bb1:
// CHECK: %[[VAL_46:.*]] = llvm.getelementptr %[[VAL_2]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_47:.*]] = llvm.load %[[VAL_46]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.ptr
-// CHECK: llvm.store %[[VAL_5]], %[[VAL_47]] {tbaa = [#[[$DATAT]]]} : i32, !llvm.ptr
+// CHECK: %[[VAL_47:.*]] = ptr.load %[[VAL_46]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.ptr
+// CHECK: ptr.store %[[VAL_5]], %[[VAL_47]] {tbaa = [#[[$DATAT]]]} : i32, !llvm.ptr
// CHECK: llvm.br ^bb2
// CHECK: ^bb2:
// CHECK: llvm.return
@@ -133,24 +133,24 @@ module {
// CHECK: %[[VAL_7:.*]] = llvm.mlir.addressof @_QFEx : !llvm.ptr
// CHECK: %[[VAL_8:.*]] = llvm.mlir.addressof @_QQclX2E2F64756D6D792E66393000 : !llvm.ptr
// CHECK: %[[VAL_10:.*]] = llvm.call @_FortranAioBeginExternalListOutput(%[[VAL_6]], %[[VAL_8]], %[[VAL_5]]) {fastmathFlags = #llvm.fastmath<contract>} : (i32, !llvm.ptr, i32) -> !llvm.ptr
-// CHECK: %[[VAL_11:.*]] = llvm.load %[[VAL_7]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: llvm.store %[[VAL_11]], %[[VAL_3]] {tbaa = [#[[$BOXT]]]} : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>, !llvm.ptr
+// CHECK: %[[VAL_11:.*]] = ptr.load %[[VAL_7]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
+// CHECK: ptr.store %[[VAL_11]], %[[VAL_3]] {tbaa = [#[[$BOXT]]]} : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>, !llvm.ptr
// CHECK: %[[VAL_12:.*]] = llvm.getelementptr %[[VAL_3]][0, 7, %[[VAL_4]], 0] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_13:.*]] = llvm.load %[[VAL_12]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
+// CHECK: %[[VAL_13:.*]] = ptr.load %[[VAL_12]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
// CHECK: %[[VAL_14:.*]] = llvm.getelementptr %[[VAL_3]][0, 7, %[[VAL_4]], 1] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_15:.*]] = llvm.load %[[VAL_14]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
+// CHECK: %[[VAL_15:.*]] = ptr.load %[[VAL_14]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
// CHECK: %[[VAL_16:.*]] = llvm.getelementptr %[[VAL_3]][0, 7, %[[VAL_4]], 2] : (!llvm.ptr, i64) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_17:.*]] = llvm.load %[[VAL_16]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
+// CHECK: %[[VAL_17:.*]] = ptr.load %[[VAL_16]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
// CHECK: %[[VAL_18:.*]] = llvm.getelementptr %[[VAL_3]][0, 8] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_19:.*]] = llvm.load %[[VAL_18]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.ptr
+// CHECK: %[[VAL_19:.*]] = ptr.load %[[VAL_18]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.ptr
// CHECK: %[[VAL_20:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[VAL_21:.*]] = llvm.mlir.constant(-1 : i32) : i32
// CHECK: %[[VAL_22:.*]] = llvm.getelementptr %[[VAL_3]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_23:.*]] = llvm.load %[[VAL_22]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
+// CHECK: %[[VAL_23:.*]] = ptr.load %[[VAL_22]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
// CHECK: %[[VAL_24:.*]] = llvm.getelementptr %[[VAL_3]][0, 4] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_25:.*]] = llvm.load %[[VAL_24]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
+// CHECK: %[[VAL_25:.*]] = ptr.load %[[VAL_24]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
// CHECK: %[[VAL_26:.*]] = llvm.getelementptr %[[VAL_3]][0, 8] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_27:.*]] = llvm.load %[[VAL_26]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.ptr
+// CHECK: %[[VAL_27:.*]] = ptr.load %[[VAL_26]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.ptr
// CHECK: %[[VAL_28:.*]] = llvm.mlir.undef : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
// CHECK: %[[VAL_29:.*]] = llvm.insertvalue %[[VAL_23]], %[[VAL_28]][1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
// CHECK: %[[VAL_30:.*]] = llvm.mlir.constant(20180515 : i32) : i32
@@ -168,13 +168,13 @@ module {
// CHECK: %[[VAL_42:.*]] = llvm.insertvalue %[[VAL_41]], %[[VAL_39]][6] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
// CHECK: %[[VAL_44:.*]] = llvm.insertvalue %[[VAL_27]], %[[VAL_42]][8] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
// CHECK: %[[VAL_45:.*]] = llvm.getelementptr %[[VAL_3]][0, 7, 0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_46:.*]] = llvm.load %[[VAL_45]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
+// CHECK: %[[VAL_46:.*]] = ptr.load %[[VAL_45]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
// CHECK: %[[VAL_47:.*]] = llvm.getelementptr %[[VAL_3]][0, 7, 0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_48:.*]] = llvm.load %[[VAL_47]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
+// CHECK: %[[VAL_48:.*]] = ptr.load %[[VAL_47]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
// CHECK: %[[VAL_49:.*]] = llvm.getelementptr %[[VAL_3]][0, 7, 0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_50:.*]] = llvm.load %[[VAL_49]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
+// CHECK: %[[VAL_50:.*]] = ptr.load %[[VAL_49]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
// CHECK: %[[VAL_51:.*]] = llvm.getelementptr %[[VAL_3]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: %[[VAL_52:.*]] = llvm.load %[[VAL_51]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.ptr
+// CHECK: %[[VAL_52:.*]] = ptr.load %[[VAL_51]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.ptr
// CHECK: %[[VAL_53:.*]] = llvm.mlir.constant(0 : i64) : i64
// CHECK: %[[VAL_54:.*]] = llvm.mlir.constant(1 : i64) : i64
// CHECK: %[[VAL_55:.*]] = llvm.icmp "eq" %[[VAL_48]], %[[VAL_53]] : i64
@@ -183,7 +183,7 @@ module {
// CHECK: %[[VAL_58:.*]] = llvm.insertvalue %[[VAL_48]], %[[VAL_57]][7, 0, 1] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
// CHECK: %[[VAL_59:.*]] = llvm.insertvalue %[[VAL_50]], %[[VAL_58]][7, 0, 2] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
// CHECK: %[[VAL_61:.*]] = llvm.insertvalue %[[VAL_52]], %[[VAL_59]][0] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>
-// CHECK: llvm.store %[[VAL_61]], %[[VAL_1]] {tbaa = [#[[$BOXT]]]} : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>, !llvm.ptr
+// CHECK: ptr.store %[[VAL_61]], %[[VAL_1]] {tbaa = [#[[$BOXT]]]} : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>, !llvm.ptr
// CHECK: %[[VAL_63:.*]] = llvm.call @_FortranAioOutputDescriptor(%[[VAL_10]], %[[VAL_1]]) {fastmathFlags = #llvm.fastmath<contract>} : (!llvm.ptr, !llvm.ptr) -> i1
// CHECK: %[[VAL_64:.*]] = llvm.call @_FortranAioEndIoStatement(%[[VAL_10]]) {fastmathFlags = #llvm.fastmath<contract>} : (!llvm.ptr) -> i32
// CHECK: llvm.return
@@ -245,7 +245,7 @@ func.func @tbaa(%arg0: !fir.box<!fir.array<*:f64>>) -> i32 {
// CHECK-LABEL: llvm.func @tbaa(
// CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr) -> i32 {
// CHECK: %[[VAL_1:.*]] = llvm.getelementptr %[[VAL_0]][0, 3] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>
-// CHECK: %[[VAL_2:.*]] = llvm.load %[[VAL_1]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
+// CHECK: %[[VAL_2:.*]] = ptr.load %[[VAL_1]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
// CHECK: llvm.return %[[VAL_2]] : i32
// CHECK: }
@@ -264,7 +264,7 @@ func.func @tbaa(%arg0: !fir.box<!fir.array<*:f64>>) -> i1 {
// CHECK-LABEL: llvm.func @tbaa(
// CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr) -> i1 {
// CHECK: %[[VAL_1:.*]] = llvm.getelementptr %[[VAL_0]][0, 3] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>
-// CHECK: %[[VAL_2:.*]] = llvm.load %[[VAL_1]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
+// CHECK: %[[VAL_2:.*]] = ptr.load %[[VAL_1]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(0 : i32) : i32
// CHECK: %[[VAL_4:.*]] = llvm.icmp "ne" %[[VAL_2]], %[[VAL_3]] : i32
// CHECK: llvm.return %[[VAL_4]] : i1
@@ -285,7 +285,7 @@ func.func @tbaa(%arg0: !fir.box<f32>) -> i32 {
// CHECK-LABEL: llvm.func @tbaa(
// CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr) -> i32 {
// CHECK: %[[VAL_1:.*]] = llvm.getelementptr %[[VAL_0]][0, 1] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>
-// CHECK: %[[VAL_2:.*]] = llvm.load %[[VAL_1]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
+// CHECK: %[[VAL_2:.*]] = ptr.load %[[VAL_1]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
// CHECK: llvm.return %[[VAL_2]] : i32
// CHECK: }
@@ -304,7 +304,7 @@ func.func @tbaa(%arg0: !fir.box<!fir.array<*:f64>>) -> i1 {
// CHECK-LABEL: llvm.func @tbaa(
// CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr) -> i1 {
// CHECK: %[[VAL_1:.*]] = llvm.getelementptr %[[VAL_0]][0, 5] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8)>
-// CHECK: %[[VAL_2:.*]] = llvm.load %[[VAL_1]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
+// CHECK: %[[VAL_2:.*]] = ptr.load %[[VAL_1]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i32
// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(2 : i32) : i32
// CHECK: %[[VAL_4:.*]] = llvm.and %[[VAL_2]], %[[VAL_3]] : i32
// CHECK: %[[VAL_5:.*]] = llvm.mlir.constant(0 : i32) : i32
@@ -333,11 +333,11 @@ func.func @tbaa(%arg0: !fir.box<!fir.array<?xi32>>) {
// CHECK: %[[VAL_4:.*]] = llvm.sub %[[VAL_1]], %[[VAL_2]] overflow<nsw> : i64
// CHECK: %[[VAL_5:.*]] = llvm.mul %[[VAL_4]], %[[VAL_2]] overflow<nsw> : i64
// CHECK: %[[VAL_6:.*]] = llvm.getelementptr %[[VAL_0]][0, 7, 0, 2] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
-// CHECK: %[[VAL_7:.*]] = llvm.load %[[VAL_6]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
+// CHECK: %[[VAL_7:.*]] = ptr.load %[[VAL_6]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> i64
// CHECK: %[[VAL_8:.*]] = llvm.mul %[[VAL_5]], %[[VAL_7]] overflow<nsw> : i64
// CHECK: %[[VAL_9:.*]] = llvm.add %[[VAL_8]], %[[VAL_3]] overflow<nsw> : i64
// CHECK: %[[VAL_10:.*]] = llvm.getelementptr %[[VAL_0]][0, 0] : (!llvm.ptr) -> !llvm.ptr, !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>
-// CHECK: %[[VAL_11:.*]] = llvm.load %[[VAL_10]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.ptr
+// CHECK: %[[VAL_11:.*]] = ptr.load %[[VAL_10]] {tbaa = [#[[$BOXT]]]} : !llvm.ptr -> !llvm.ptr
// CHECK: %[[VAL_13:.*]] = llvm.getelementptr %[[VAL_11]]{{\[}}%[[VAL_9]]] : (!llvm.ptr, i64) -> !llvm.ptr, i8
// CHECK: llvm.return
// CHECK: }
@@ -356,8 +356,8 @@ func.func @tbaa(%arg0: !fir.ref<!fir.type<_QMtypesTt{x:!fir.box<!fir.heap<f32>>}
return
}
// CHECK-LABEL: llvm.func @tbaa(
-// CHECK: llvm.load{{.*}}{tbaa = [#[[$ANYT]]]}
-// CHECK: llvm.store{{.*}}{tbaa = [#[[$ANYT]]]}
+// CHECK: ptr.load{{.*}}{tbaa = [#[[$ANYT]]]}
+// CHECK: ptr.store{{.*}}{tbaa = [#[[$ANYT]]]}
// -----
@@ -373,5 +373,5 @@ func.func @tbaa(%arg0: !fir.ref<!fir.array<2x!fir.type<_QMtypesTt{x:!fir.box<!fi
return
}
// CHECK-LABEL: llvm.func @tbaa(
-// CHECK: llvm.load{{.*}}{tbaa = [#[[$ANYT]]]}
-// CHECK: llvm.store{{.*}}{tbaa = [#[[$ANYT]]]}
+// CHECK: ptr.load{{.*}}{tbaa = [#[[$ANYT]]]}
+// CHECK: ptr.store{{.*}}{tbaa = [#[[$ANYT]]]}
diff --git a/flang/test/Lower/OpenMP/FIR/flush.f90 b/flang/test/Lower/OpenMP/FIR/flush.f90
index 2c281632b85cb0..a8e8be8ab3aba9 100644
--- a/flang/test/Lower/OpenMP/FIR/flush.f90
+++ b/flang/test/Lower/OpenMP/FIR/flush.f90
@@ -33,10 +33,10 @@ subroutine flush_parallel(a, b, c)
!FIRDialect: %{{.*}} = arith.addi %{{.*}}, %{{.*}} : i32
!FIRDialect: fir.store %{{.*}} to %{{.*}} : !fir.ref<i32>
-!LLVMIRDialect: %{{.*}} = llvm.load %{{.*}} : !llvm.ptr -> i32
-!LLVMIRDialect: %{{.*}} = llvm.load %{{.*}} : !llvm.ptr -> i32
+!LLVMIRDialect: %{{.*}} = ptr.load %{{.*}} : !llvm.ptr -> i32
+!LLVMIRDialect: %{{.*}} = ptr.load %{{.*}} : !llvm.ptr -> i32
!LLVMIRDialect: %{{.*}} = llvm.add %{{.*}}, %{{.*}} : i32
-!LLVMIRDialect: llvm.store %{{.*}}, %{{.*}} : i32, !llvm.ptr
+!LLVMIRDialect: ptr.store %{{.*}}, %{{.*}} : i32, !llvm.ptr
c = a + b
!OMPDialect: omp.terminator
More information about the flang-commits
mailing list