[llvm] [SROA] Prevent load atomic vector from being generated (PR #112432)

via llvm-commits llvm-commits at lists.llvm.org
Tue Oct 15 13:15:17 PDT 2024


https://github.com/jofrn created https://github.com/llvm/llvm-project/pull/112432

These are illegal, and they can be formed from SROA via indirect volatile loads in the AllocaSliceRewriter.

>From cb05b5699e5a65093aaefe26a8128a400c459a9c Mon Sep 17 00:00:00 2001
From: jofernau <Joe.Fernau at amd.com>
Date: Tue, 15 Oct 2024 16:06:27 -0400
Subject: [PATCH] [SROA] Prevent load atomic vector from being generated

These are illegal, and they can be formed from SROA via indirect
volatile loads in the AllocaSliceRewriter.
---
 llvm/lib/Transforms/Scalar/SROA.cpp        |  5 +++++
 llvm/test/Transforms/SROA/atomic-vector.ll | 19 +++++++++++++++++++
 2 files changed, 24 insertions(+)
 create mode 100644 llvm/test/Transforms/SROA/atomic-vector.ll

diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 92589ab17da313..450ecdf20ef009 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -2853,6 +2853,11 @@ class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
 
   bool visitLoadInst(LoadInst &LI) {
     LLVM_DEBUG(dbgs() << "    original: " << LI << "\n");
+
+    // load atomic vector would be generated, which is illegal
+    if (LI.isAtomic() && NewAI.getAllocatedType()->isVectorTy())
+      return false;
+
     Value *OldOp = LI.getOperand(0);
     assert(OldOp == OldPtr);
 
diff --git a/llvm/test/Transforms/SROA/atomic-vector.ll b/llvm/test/Transforms/SROA/atomic-vector.ll
new file mode 100644
index 00000000000000..d43ae653fba1dd
--- /dev/null
+++ b/llvm/test/Transforms/SROA/atomic-vector.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -passes='sroa' -S 2>&1 | FileCheck %s --check-prefix=ERR
+; RUN: opt < %s -passes='sroa' -S | FileCheck %s
+
+define float @atomic_vector() {
+; ERR-NOT: atomic load operand must have integer, pointer, or floating point type!
+; ERR-NOT:   <1 x float>  {{%.*}} = load atomic volatile <1 x float>, ptr {{%.*}} acquire, align 4
+; CHECK:      %1 = alloca <1 x float>, align 4
+; CHECK-NEXT: store <1 x float> undef, ptr %1, align 4
+; CHECK-NEXT: %2 = load atomic volatile float, ptr %1 acquire, align 4
+; CHECK-NEXT: ret float %2
+  %1 = alloca <1 x float>
+  %2 = alloca <1 x float>
+  %3 = alloca ptr
+  call void @llvm.memcpy.p0.p0.i64(ptr %2, ptr %1, i64 4, i1 false)
+  store ptr %2, ptr %3
+  %4 = load ptr, ptr %3
+  %5 = load atomic volatile float, ptr %4 acquire, align 4
+  ret float %5
+}



More information about the llvm-commits mailing list