[flang-commits] [flang] [flang] Characterize allocation based on MemAlloc effect instead of pattern matching (PR #166806)

Susan Tan ス-ザン タン via flang-commits flang-commits at lists.llvm.org
Thu Nov 6 09:27:44 PST 2025


https://github.com/SusanTan updated https://github.com/llvm/llvm-project/pull/166806

>From 05e4d390b46f5078c82a6f399c75cf1b034a7927 Mon Sep 17 00:00:00 2001
From: Susan Tan <zujunt at nvidia.com>
Date: Wed, 5 Nov 2025 19:13:01 -0800
Subject: [PATCH 1/5] first implementation

---
 .../lib/Optimizer/Analysis/AliasAnalysis.cpp  | 37 +++++++++++++++++++
 .../AliasAnalysis/cuf-alloc-source-kind.mlir  | 22 +++++++++++
 2 files changed, 59 insertions(+)
 create mode 100644 flang/test/Analysis/AliasAnalysis/cuf-alloc-source-kind.mlir

diff --git a/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp b/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp
index 73ddd1ff80126..2fabf8d7e95bf 100644
--- a/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp
+++ b/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp
@@ -22,6 +22,7 @@
 #include "llvm/ADT/TypeSwitch.h"
 #include "llvm/Support/Casting.h"
 #include "llvm/Support/Debug.h"
+#include "mlir/Interfaces/ViewLikeInterface.h"
 
 using namespace mlir;
 
@@ -535,6 +536,42 @@ AliasAnalysis::Source AliasAnalysis::getSource(mlir::Value v,
   mlir::Operation *instantiationPoint{nullptr};
   while (defOp && !breakFromLoop) {
     ty = defOp->getResultTypes()[0];
+
+    // Effect-based detection using op-scoped Allocate with conservative
+    // heuristics (ignore value-scoped signals per request).
+    if (auto memIface = llvm::dyn_cast<mlir::MemoryEffectOpInterface>(defOp)) {
+      llvm::SmallVector<mlir::MemoryEffects::EffectInstance, 4> effects;
+      memIface.getEffects(effects);
+      bool sawOpScopedAlloc = false;
+      for (auto &ei : effects) {
+        bool isAlloc = mlir::isa<mlir::MemoryEffects::Allocate>(ei.getEffect());
+        if (!ei.getValue() && isAlloc) {
+          sawOpScopedAlloc = true;
+        }
+      }
+      if (sawOpScopedAlloc) {
+        auto isMemoryRefLikeType = [](mlir::Type t) {
+          return fir::isa_ref_type(t) || mlir::isa<mlir::BaseMemRefType>(t) ||
+                 mlir::isa<mlir::LLVM::LLVMPointerType>(t);
+        };
+        bool opIsViewLike = (bool)mlir::dyn_cast_or_null<mlir::ViewLikeOpInterface>(defOp);
+        bool hasMemOperands = llvm::any_of(defOp->getOperands(), [&](mlir::Value opnd) {
+          return isMemoryRefLikeType(opnd.getType());
+        });
+        if (!opIsViewLike && !hasMemOperands) {
+          for (mlir::Value res : defOp->getResults()) {
+            if (res == v && isMemoryRefLikeType(res.getType())) {
+              type = SourceKind::Allocate;
+              breakFromLoop = true;
+              break;
+            }
+          }
+          if (breakFromLoop)
+            break;
+        }
+      }
+    }
+
     llvm::TypeSwitch<Operation *>(defOp)
         .Case<hlfir::AsExprOp>([&](auto op) {
           v = op.getVar();
diff --git a/flang/test/Analysis/AliasAnalysis/cuf-alloc-source-kind.mlir b/flang/test/Analysis/AliasAnalysis/cuf-alloc-source-kind.mlir
new file mode 100644
index 0000000000000..6a911f8ff25e3
--- /dev/null
+++ b/flang/test/Analysis/AliasAnalysis/cuf-alloc-source-kind.mlir
@@ -0,0 +1,22 @@
+// REQUIRES: asserts
+// RUN: fir-opt %s -pass-pipeline='builtin.module(func.func(test-fir-alias-analysis))' -debug-only=fir-alias-analysis --mlir-disable-threading 2>&1 | FileCheck %s
+
+// Verify that a CUF allocation is recognized as SourceKind::Allocate by
+// fir::AliasAnalysis::getSource.
+
+module {
+  func.func @_QQmain() attributes {fir.bindc_name = "TEST"} {
+    // Allocate two independent device arrays and tag the results; with
+    // op-scoped MemAlloc handling in AA, these should be classified as
+    // Allocate and not alias.
+    %a = cuf.alloc !fir.box<!fir.heap<!fir.array<?xf32>>> {bindc_name = "a1", data_attr = #cuf.cuda<device>, uniq_name = "_QFEa1", test.ptr = "cuf_alloc_a"} -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
+    %b = cuf.alloc !fir.box<!fir.heap<!fir.array<?xf32>>> {bindc_name = "a2", data_attr = #cuf.cuda<device>, uniq_name = "_QFEa2", test.ptr = "cuf_alloc_b"} -> !fir.ref<!fir.box<!fir.heap<!fir.array<?xf32>>>>
+    return
+  }
+}
+
+// CHECK-LABEL: Testing : "_QQmain"
+// Distinct allocations should not alias.
+// CHECK: cuf_alloc_a#0 <-> cuf_alloc_b#0: NoAlias
+
+

>From 0295bd12cfa1036e9569cbbe752668cfbb9265a3 Mon Sep 17 00:00:00 2001
From: Susan Tan <zujunt at nvidia.com>
Date: Wed, 5 Nov 2025 19:37:10 -0800
Subject: [PATCH 2/5] replace the main memalloc

---
 .../lib/Optimizer/Analysis/AliasAnalysis.cpp  | 91 ++++++++++++++-----
 1 file changed, 68 insertions(+), 23 deletions(-)

diff --git a/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp b/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp
index 2fabf8d7e95bf..f3496de360849 100644
--- a/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp
+++ b/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp
@@ -534,6 +534,16 @@ AliasAnalysis::Source AliasAnalysis::getSource(mlir::Value v,
   mlir::SymbolRefAttr global;
   Source::Attributes attributes;
   mlir::Operation *instantiationPoint{nullptr};
+  // Helper to conservatively classify a candidate value as coming from a
+  // dummy argument or as indirect when no allocation or global can be proven.
+  auto classifyFallbackFrom = [&](mlir::Value candidate) {
+    if (isDummyArgument(candidate)) {
+      defOp = nullptr;
+      v = candidate;
+    } else {
+      type = SourceKind::Indirect;
+    }
+  };
   while (defOp && !breakFromLoop) {
     ty = defOp->getResultTypes()[0];
 
@@ -578,22 +588,14 @@ AliasAnalysis::Source AliasAnalysis::getSource(mlir::Value v,
           defOp = v.getDefiningOp();
         })
         .Case<hlfir::AssociateOp>([&](auto op) {
+          // Do not pattern-match Allocate. Trace through the source.
           mlir::Value source = op.getSource();
-          if (fir::isa_trivial(source.getType())) {
-            // Trivial values will always use distinct temp memory,
-            // so we can classify this as Allocate and stop.
-            type = SourceKind::Allocate;
-            breakFromLoop = true;
-          } else {
-            // AssociateOp may reuse the expression storage,
-            // so we have to trace further.
-            v = source;
-            defOp = v.getDefiningOp();
-          }
+          v = source;
+          defOp = v.getDefiningOp();
         })
         .Case<fir::AllocaOp, fir::AllocMemOp>([&](auto op) {
-          // Unique memory allocation.
-          type = SourceKind::Allocate;
+          // Do not pattern-match allocations by op name; rely on memory
+          // effects classification above. Nothing to do here.
           breakFromLoop = true;
         })
         .Case<fir::ConvertOp>([&](auto op) {
@@ -665,17 +667,60 @@ AliasAnalysis::Source AliasAnalysis::getSource(mlir::Value v,
               type = SourceKind::Global;
             } else {
               auto def = llvm::cast<mlir::Value>(boxSrc.origin.u);
-              // TODO: Add support to fir.allocmem
-              if (auto allocOp = def.template getDefiningOp<fir::AllocaOp>()) {
-                v = def;
-                defOp = v.getDefiningOp();
-                type = SourceKind::Allocate;
-              } else if (isDummyArgument(def)) {
-                defOp = nullptr;
-                v = def;
-              } else {
-                type = SourceKind::Indirect;
+              bool classified = false;
+              if (auto defDefOp = def.getDefiningOp()) {
+                if (auto defIface =
+                        llvm::dyn_cast<mlir::MemoryEffectOpInterface>(defDefOp)) {
+                  llvm::SmallVector<mlir::MemoryEffects::EffectInstance, 4> eff;
+                  defIface.getEffects(eff);
+                  // Prefer value-scoped Allocate on the underlying storage.
+                  for (auto &e : eff) {
+                    if (mlir::isa<mlir::MemoryEffects::Allocate>(e.getEffect()) &&
+                        e.getValue() && e.getValue() == def) {
+                      v = def;
+                      defOp = v.getDefiningOp();
+                      type = SourceKind::Allocate;
+                      classified = true;
+                      break;
+                    }
+                  }
+                  // Heuristic for op-scoped Allocate at the underlying defining op.
+                  if (!classified) {
+                    bool sawOpScopedAlloc = llvm::any_of(
+                        eff, [](auto &e) {
+                          return !e.getValue() &&
+                                 mlir::isa<mlir::MemoryEffects::Allocate>(
+                                     e.getEffect());
+                        });
+                    if (sawOpScopedAlloc) {
+                      auto isMemoryRefLikeType = [](mlir::Type t) {
+                        return fir::isa_ref_type(t) ||
+                               mlir::isa<mlir::BaseMemRefType>(t) ||
+                               mlir::isa<mlir::LLVM::LLVMPointerType>(t);
+                      };
+                      bool opIsViewLike = (bool)mlir::dyn_cast_or_null<
+                          mlir::ViewLikeOpInterface>(defDefOp);
+                      bool hasMemOperands = llvm::any_of(
+                          defDefOp->getOperands(), [&](mlir::Value opnd) {
+                            return isMemoryRefLikeType(opnd.getType());
+                          });
+                      if (!opIsViewLike && !hasMemOperands) {
+                        for (mlir::Value res : defDefOp->getResults()) {
+                          if (res == def && isMemoryRefLikeType(res.getType())) {
+                            v = def;
+                            defOp = v.getDefiningOp();
+                            type = SourceKind::Allocate;
+                            classified = true;
+                            break;
+                          }
+                        }
+                      }
+                    }
+                  }
+                }
               }
+              if (!classified)
+                classifyFallbackFrom(def);
             }
             breakFromLoop = true;
             return;

>From 4e64702e18ed3b0037a3b3f5a2aede4a7055b36d Mon Sep 17 00:00:00 2001
From: Susan Tan <zujunt at nvidia.com>
Date: Thu, 6 Nov 2025 08:53:46 -0800
Subject: [PATCH 3/5] refactor

---
 .../lib/Optimizer/Analysis/AliasAnalysis.cpp  | 169 +++++++++---------
 1 file changed, 81 insertions(+), 88 deletions(-)

diff --git a/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp b/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp
index f3496de360849..94f9ec5892c58 100644
--- a/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp
+++ b/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp
@@ -28,6 +28,67 @@ using namespace mlir;
 
 #define DEBUG_TYPE "fir-alias-analysis"
 
+//===----------------------------------------------------------------------===//
+// AliasAnalysis: alias helpers
+//===----------------------------------------------------------------------===//
+
+static bool tryClassifyAllocateFromEffects(mlir::Operation *op,
+    mlir::Value candidate, bool allowValueScoped, bool allowOpScoped,
+    mlir::Value &v, mlir::Operation *&defOp,
+    fir::AliasAnalysis::SourceKind &type) {
+  auto iface = llvm::dyn_cast<mlir::MemoryEffectOpInterface>(op);
+  if (!iface)
+    return false;
+
+  llvm::SmallVector<mlir::MemoryEffects::EffectInstance, 4> effects;
+  iface.getEffects(effects);
+
+  if (allowValueScoped) {
+    for (mlir::MemoryEffects::EffectInstance &e : effects) {
+      if (mlir::isa<mlir::MemoryEffects::Allocate>(e.getEffect()) &&
+          e.getValue() && e.getValue() == candidate) {
+        v = candidate;
+        defOp = op;
+        type = fir::AliasAnalysis::SourceKind::Allocate;
+        return true;
+      }
+    }
+  }
+
+  if (!allowOpScoped)
+    return false;
+
+  bool hasOpScopedAlloc = llvm::any_of(
+      effects, [](const mlir::MemoryEffects::EffectInstance &e) {
+        return !e.getValue() &&
+               mlir::isa<mlir::MemoryEffects::Allocate>(e.getEffect());
+      });
+  if (!hasOpScopedAlloc)
+    return false;
+
+  bool opIsViewLike =
+      (bool)mlir::dyn_cast_or_null<mlir::ViewLikeOpInterface>(op);
+  auto isMemoryRefLikeType = [](mlir::Type type) {
+    return fir::isa_ref_type(type) || mlir::isa<mlir::BaseMemRefType>(type) ||
+           mlir::isa<mlir::LLVM::LLVMPointerType>(type);
+  };
+  bool hasMemOperands = llvm::any_of(op->getOperands(), [&](mlir::Value o) {
+    return isMemoryRefLikeType(o.getType());
+  });
+  if (opIsViewLike || hasMemOperands)
+    return false;
+
+  for (mlir::Value res : op->getResults()) {
+    if (res == candidate && isMemoryRefLikeType(res.getType())) {
+      v = candidate;
+      defOp = op;
+      type = fir::AliasAnalysis::SourceKind::Allocate;
+      return true;
+    }
+  }
+  return false;
+}
+
 //===----------------------------------------------------------------------===//
 // AliasAnalysis: alias
 //===----------------------------------------------------------------------===//
@@ -544,43 +605,22 @@ AliasAnalysis::Source AliasAnalysis::getSource(mlir::Value v,
       type = SourceKind::Indirect;
     }
   };
+
+  // Helper to detect memory-ref-like types.
+  auto isMemoryRefLikeType = [](mlir::Type t) {
+    return fir::isa_ref_type(t) || mlir::isa<mlir::BaseMemRefType>(t) ||
+           mlir::isa<mlir::LLVM::LLVMPointerType>(t);
+  };
+
   while (defOp && !breakFromLoop) {
     ty = defOp->getResultTypes()[0];
 
-    // Effect-based detection using op-scoped Allocate with conservative
-    // heuristics (ignore value-scoped signals per request).
-    if (auto memIface = llvm::dyn_cast<mlir::MemoryEffectOpInterface>(defOp)) {
-      llvm::SmallVector<mlir::MemoryEffects::EffectInstance, 4> effects;
-      memIface.getEffects(effects);
-      bool sawOpScopedAlloc = false;
-      for (auto &ei : effects) {
-        bool isAlloc = mlir::isa<mlir::MemoryEffects::Allocate>(ei.getEffect());
-        if (!ei.getValue() && isAlloc) {
-          sawOpScopedAlloc = true;
-        }
-      }
-      if (sawOpScopedAlloc) {
-        auto isMemoryRefLikeType = [](mlir::Type t) {
-          return fir::isa_ref_type(t) || mlir::isa<mlir::BaseMemRefType>(t) ||
-                 mlir::isa<mlir::LLVM::LLVMPointerType>(t);
-        };
-        bool opIsViewLike = (bool)mlir::dyn_cast_or_null<mlir::ViewLikeOpInterface>(defOp);
-        bool hasMemOperands = llvm::any_of(defOp->getOperands(), [&](mlir::Value opnd) {
-          return isMemoryRefLikeType(opnd.getType());
-        });
-        if (!opIsViewLike && !hasMemOperands) {
-          for (mlir::Value res : defOp->getResults()) {
-            if (res == v && isMemoryRefLikeType(res.getType())) {
-              type = SourceKind::Allocate;
-              breakFromLoop = true;
-              break;
-            }
-          }
-          if (breakFromLoop)
-            break;
-        }
-      }
-    }
+    // Effect-based detection (op-scoped heuristic only at this level).
+    if (tryClassifyAllocateFromEffects(defOp, v,
+                                       /*allowValueScoped=*/false,
+                                       /*allowOpScoped=*/true,
+                                       v, defOp, type))
+      break;
 
     llvm::TypeSwitch<Operation *>(defOp)
         .Case<hlfir::AsExprOp>([&](auto op) {
@@ -666,61 +706,14 @@ AliasAnalysis::Source AliasAnalysis::getSource(mlir::Value v,
             if (global) {
               type = SourceKind::Global;
             } else {
-              auto def = llvm::cast<mlir::Value>(boxSrc.origin.u);
+              mlir::Value def = llvm::cast<mlir::Value>(boxSrc.origin.u);
               bool classified = false;
-              if (auto defDefOp = def.getDefiningOp()) {
-                if (auto defIface =
-                        llvm::dyn_cast<mlir::MemoryEffectOpInterface>(defDefOp)) {
-                  llvm::SmallVector<mlir::MemoryEffects::EffectInstance, 4> eff;
-                  defIface.getEffects(eff);
-                  // Prefer value-scoped Allocate on the underlying storage.
-                  for (auto &e : eff) {
-                    if (mlir::isa<mlir::MemoryEffects::Allocate>(e.getEffect()) &&
-                        e.getValue() && e.getValue() == def) {
-                      v = def;
-                      defOp = v.getDefiningOp();
-                      type = SourceKind::Allocate;
-                      classified = true;
-                      break;
-                    }
-                  }
-                  // Heuristic for op-scoped Allocate at the underlying defining op.
-                  if (!classified) {
-                    bool sawOpScopedAlloc = llvm::any_of(
-                        eff, [](auto &e) {
-                          return !e.getValue() &&
-                                 mlir::isa<mlir::MemoryEffects::Allocate>(
-                                     e.getEffect());
-                        });
-                    if (sawOpScopedAlloc) {
-                      auto isMemoryRefLikeType = [](mlir::Type t) {
-                        return fir::isa_ref_type(t) ||
-                               mlir::isa<mlir::BaseMemRefType>(t) ||
-                               mlir::isa<mlir::LLVM::LLVMPointerType>(t);
-                      };
-                      bool opIsViewLike = (bool)mlir::dyn_cast_or_null<
-                          mlir::ViewLikeOpInterface>(defDefOp);
-                      bool hasMemOperands = llvm::any_of(
-                          defDefOp->getOperands(), [&](mlir::Value opnd) {
-                            return isMemoryRefLikeType(opnd.getType());
-                          });
-                      if (!opIsViewLike && !hasMemOperands) {
-                        for (mlir::Value res : defDefOp->getResults()) {
-                          if (res == def && isMemoryRefLikeType(res.getType())) {
-                            v = def;
-                            defOp = v.getDefiningOp();
-                            type = SourceKind::Allocate;
-                            classified = true;
-                            break;
-                          }
-                        }
-                      }
-                    }
-                  }
-                }
-              }
-              if (!classified)
-                classifyFallbackFrom(def);
+              if (auto defDefOp = def.getDefiningOp())
+                classified = tryClassifyAllocateFromEffects(
+                    defDefOp, def,
+                    /*allowValueScoped=*/true, /*allowOpScoped=*/true,
+                    v, defOp, type);
+              if (!classified) classifyFallbackFrom(def);
             }
             breakFromLoop = true;
             return;

>From d4ffa26657ba26970045ff31c1dec7052c7a953e Mon Sep 17 00:00:00 2001
From: Susan Tan <zujunt at nvidia.com>
Date: Thu, 6 Nov 2025 09:19:58 -0800
Subject: [PATCH 4/5] cleanup

---
 .../lib/Optimizer/Analysis/AliasAnalysis.cpp  | 27 ++++++++++---------
 1 file changed, 14 insertions(+), 13 deletions(-)

diff --git a/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp b/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp
index 94f9ec5892c58..2c43db7f81218 100644
--- a/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp
+++ b/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp
@@ -19,23 +19,24 @@
 #include "mlir/IR/BuiltinOps.h"
 #include "mlir/IR/Value.h"
 #include "mlir/Interfaces/SideEffectInterfaces.h"
+#include "mlir/Interfaces/ViewLikeInterface.h"
 #include "llvm/ADT/TypeSwitch.h"
 #include "llvm/Support/Casting.h"
 #include "llvm/Support/Debug.h"
-#include "mlir/Interfaces/ViewLikeInterface.h"
 
 using namespace mlir;
 
 #define DEBUG_TYPE "fir-alias-analysis"
 
 //===----------------------------------------------------------------------===//
-// AliasAnalysis: alias helpers
+// AliasAnalysis: allocation detection based on MemAlloc effect
 //===----------------------------------------------------------------------===//
 
-static bool tryClassifyAllocateFromEffects(mlir::Operation *op,
-    mlir::Value candidate, bool allowValueScoped, bool allowOpScoped,
-    mlir::Value &v, mlir::Operation *&defOp,
-    fir::AliasAnalysis::SourceKind &type) {
+static bool
+tryClassifyAllocateFromEffects(mlir::Operation *op, mlir::Value candidate,
+                               bool allowValueScoped, bool allowOpScoped,
+                               mlir::Value &v, mlir::Operation *&defOp,
+                               fir::AliasAnalysis::SourceKind &type) {
   auto iface = llvm::dyn_cast<mlir::MemoryEffectOpInterface>(op);
   if (!iface)
     return false;
@@ -58,8 +59,8 @@ static bool tryClassifyAllocateFromEffects(mlir::Operation *op,
   if (!allowOpScoped)
     return false;
 
-  bool hasOpScopedAlloc = llvm::any_of(
-      effects, [](const mlir::MemoryEffects::EffectInstance &e) {
+  bool hasOpScopedAlloc =
+      llvm::any_of(effects, [](const mlir::MemoryEffects::EffectInstance &e) {
         return !e.getValue() &&
                mlir::isa<mlir::MemoryEffects::Allocate>(e.getEffect());
       });
@@ -618,8 +619,7 @@ AliasAnalysis::Source AliasAnalysis::getSource(mlir::Value v,
     // Effect-based detection (op-scoped heuristic only at this level).
     if (tryClassifyAllocateFromEffects(defOp, v,
                                        /*allowValueScoped=*/false,
-                                       /*allowOpScoped=*/true,
-                                       v, defOp, type))
+                                       /*allowOpScoped=*/true, v, defOp, type))
       break;
 
     llvm::TypeSwitch<Operation *>(defOp)
@@ -711,9 +711,10 @@ AliasAnalysis::Source AliasAnalysis::getSource(mlir::Value v,
               if (auto defDefOp = def.getDefiningOp())
                 classified = tryClassifyAllocateFromEffects(
                     defDefOp, def,
-                    /*allowValueScoped=*/true, /*allowOpScoped=*/true,
-                    v, defOp, type);
-              if (!classified) classifyFallbackFrom(def);
+                    /*allowValueScoped=*/true, /*allowOpScoped=*/true, v, defOp,
+                    type);
+              if (!classified)
+                classifyFallbackFrom(def);
             }
             breakFromLoop = true;
             return;

>From 163cda55aa4fc0fe574f73253b2b8d74b435c5b8 Mon Sep 17 00:00:00 2001
From: Susan Tan <zujunt at nvidia.com>
Date: Thu, 6 Nov 2025 09:27:31 -0800
Subject: [PATCH 5/5] rm unused function

---
 flang/lib/Optimizer/Analysis/AliasAnalysis.cpp | 12 +++---------
 1 file changed, 3 insertions(+), 9 deletions(-)

diff --git a/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp b/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp
index 2c43db7f81218..b9cc5a4c8b261 100644
--- a/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp
+++ b/flang/lib/Optimizer/Analysis/AliasAnalysis.cpp
@@ -69,18 +69,18 @@ tryClassifyAllocateFromEffects(mlir::Operation *op, mlir::Value candidate,
 
   bool opIsViewLike =
       (bool)mlir::dyn_cast_or_null<mlir::ViewLikeOpInterface>(op);
-  auto isMemoryRefLikeType = [](mlir::Type type) {
+  auto isMemRefLikeType = [](mlir::Type type) {
     return fir::isa_ref_type(type) || mlir::isa<mlir::BaseMemRefType>(type) ||
            mlir::isa<mlir::LLVM::LLVMPointerType>(type);
   };
   bool hasMemOperands = llvm::any_of(op->getOperands(), [&](mlir::Value o) {
-    return isMemoryRefLikeType(o.getType());
+    return isMemRefLikeType(o.getType());
   });
   if (opIsViewLike || hasMemOperands)
     return false;
 
   for (mlir::Value res : op->getResults()) {
-    if (res == candidate && isMemoryRefLikeType(res.getType())) {
+    if (res == candidate && isMemRefLikeType(res.getType())) {
       v = candidate;
       defOp = op;
       type = fir::AliasAnalysis::SourceKind::Allocate;
@@ -607,12 +607,6 @@ AliasAnalysis::Source AliasAnalysis::getSource(mlir::Value v,
     }
   };
 
-  // Helper to detect memory-ref-like types.
-  auto isMemoryRefLikeType = [](mlir::Type t) {
-    return fir::isa_ref_type(t) || mlir::isa<mlir::BaseMemRefType>(t) ||
-           mlir::isa<mlir::LLVM::LLVMPointerType>(t);
-  };
-
   while (defOp && !breakFromLoop) {
     ty = defOp->getResultTypes()[0];
 



More information about the flang-commits mailing list