[llvm] [SROA] Prevent load atomic vector from being generated (PR #112432)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Oct 21 03:49:43 PDT 2024
https://github.com/jofrn updated https://github.com/llvm/llvm-project/pull/112432
>From cb05b5699e5a65093aaefe26a8128a400c459a9c Mon Sep 17 00:00:00 2001
From: jofernau <Joe.Fernau at amd.com>
Date: Tue, 15 Oct 2024 16:06:27 -0400
Subject: [PATCH 01/10] [SROA] Prevent load atomic vector from being generated
These are illegal, and they can be formed from SROA via indirect
volatile loads in the AllocaSliceRewriter.
---
llvm/lib/Transforms/Scalar/SROA.cpp | 5 +++++
llvm/test/Transforms/SROA/atomic-vector.ll | 19 +++++++++++++++++++
2 files changed, 24 insertions(+)
create mode 100644 llvm/test/Transforms/SROA/atomic-vector.ll
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 92589ab17da313..450ecdf20ef009 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -2853,6 +2853,11 @@ class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
bool visitLoadInst(LoadInst &LI) {
LLVM_DEBUG(dbgs() << " original: " << LI << "\n");
+
+ // load atomic vector would be generated, which is illegal
+ if (LI.isAtomic() && NewAI.getAllocatedType()->isVectorTy())
+ return false;
+
Value *OldOp = LI.getOperand(0);
assert(OldOp == OldPtr);
diff --git a/llvm/test/Transforms/SROA/atomic-vector.ll b/llvm/test/Transforms/SROA/atomic-vector.ll
new file mode 100644
index 00000000000000..d43ae653fba1dd
--- /dev/null
+++ b/llvm/test/Transforms/SROA/atomic-vector.ll
@@ -0,0 +1,19 @@
+; RUN: opt < %s -passes='sroa' -S 2>&1 | FileCheck %s --check-prefix=ERR
+; RUN: opt < %s -passes='sroa' -S | FileCheck %s
+
+define float @atomic_vector() {
+; ERR-NOT: atomic load operand must have integer, pointer, or floating point type!
+; ERR-NOT: <1 x float> {{%.*}} = load atomic volatile <1 x float>, ptr {{%.*}} acquire, align 4
+; CHECK: %1 = alloca <1 x float>, align 4
+; CHECK-NEXT: store <1 x float> undef, ptr %1, align 4
+; CHECK-NEXT: %2 = load atomic volatile float, ptr %1 acquire, align 4
+; CHECK-NEXT: ret float %2
+ %1 = alloca <1 x float>
+ %2 = alloca <1 x float>
+ %3 = alloca ptr
+ call void @llvm.memcpy.p0.p0.i64(ptr %2, ptr %1, i64 4, i1 false)
+ store ptr %2, ptr %3
+ %4 = load ptr, ptr %3
+ %5 = load atomic volatile float, ptr %4 acquire, align 4
+ ret float %5
+}
>From 0c6150556041dc4ede3039a0f00c7395d7c0898c Mon Sep 17 00:00:00 2001
From: jofernau <Joe.Fernau at amd.com>
Date: Tue, 15 Oct 2024 17:16:48 -0400
Subject: [PATCH 02/10] Autogenerate assertions
---
llvm/test/Transforms/SROA/atomic-vector.ll | 14 +++++++-------
1 file changed, 7 insertions(+), 7 deletions(-)
diff --git a/llvm/test/Transforms/SROA/atomic-vector.ll b/llvm/test/Transforms/SROA/atomic-vector.ll
index d43ae653fba1dd..c81dc25b0ad52a 100644
--- a/llvm/test/Transforms/SROA/atomic-vector.ll
+++ b/llvm/test/Transforms/SROA/atomic-vector.ll
@@ -1,13 +1,13 @@
-; RUN: opt < %s -passes='sroa' -S 2>&1 | FileCheck %s --check-prefix=ERR
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt < %s -passes='sroa' -S | FileCheck %s
define float @atomic_vector() {
-; ERR-NOT: atomic load operand must have integer, pointer, or floating point type!
-; ERR-NOT: <1 x float> {{%.*}} = load atomic volatile <1 x float>, ptr {{%.*}} acquire, align 4
-; CHECK: %1 = alloca <1 x float>, align 4
-; CHECK-NEXT: store <1 x float> undef, ptr %1, align 4
-; CHECK-NEXT: %2 = load atomic volatile float, ptr %1 acquire, align 4
-; CHECK-NEXT: ret float %2
+; CHECK-LABEL: define float @atomic_vector() {
+; CHECK-NEXT: [[TMP1:%.*]] = alloca <1 x float>, align 4
+; CHECK-NEXT: store <1 x float> undef, ptr [[TMP1]], align 4
+; CHECK-NEXT: [[TMP2:%.*]] = load atomic volatile float, ptr [[TMP1]] acquire, align 4
+; CHECK-NEXT: ret float [[TMP2]]
+;
%1 = alloca <1 x float>
%2 = alloca <1 x float>
%3 = alloca ptr
>From 083346d194bd64f5152a109c6b07e343cd3cf166 Mon Sep 17 00:00:00 2001
From: jofernau <Joe.Fernau at amd.com>
Date: Wed, 16 Oct 2024 09:02:16 -0400
Subject: [PATCH 03/10] Add i32,ptr tests and rename variables
---
llvm/test/Transforms/SROA/atomic-vector.ll | 51 ++++++++++++++++++----
1 file changed, 43 insertions(+), 8 deletions(-)
diff --git a/llvm/test/Transforms/SROA/atomic-vector.ll b/llvm/test/Transforms/SROA/atomic-vector.ll
index c81dc25b0ad52a..1b6cc275681a80 100644
--- a/llvm/test/Transforms/SROA/atomic-vector.ll
+++ b/llvm/test/Transforms/SROA/atomic-vector.ll
@@ -8,12 +8,47 @@ define float @atomic_vector() {
; CHECK-NEXT: [[TMP2:%.*]] = load atomic volatile float, ptr [[TMP1]] acquire, align 4
; CHECK-NEXT: ret float [[TMP2]]
;
- %1 = alloca <1 x float>
- %2 = alloca <1 x float>
- %3 = alloca ptr
- call void @llvm.memcpy.p0.p0.i64(ptr %2, ptr %1, i64 4, i1 false)
- store ptr %2, ptr %3
- %4 = load ptr, ptr %3
- %5 = load atomic volatile float, ptr %4 acquire, align 4
- ret float %5
+ %src = alloca <1 x float>
+ %val = alloca <1 x float>
+ %direct = alloca ptr
+ call void @llvm.memcpy.p0.p0.i64(ptr %val, ptr %src, i64 4, i1 false)
+ store ptr %val, ptr %direct
+ %indirect = load ptr, ptr %direct
+ %ret = load atomic volatile float, ptr %indirect acquire, align 4
+ ret float %ret
+}
+
+define i32 @atomic_vector_int() {
+; CHECK-LABEL: define i32 @atomic_vector_int() {
+; CHECK-NEXT: [[VAL:%.*]] = alloca <1 x i32>, align 4
+; CHECK-NEXT: store <1 x i32> undef, ptr [[VAL]], align 4
+; CHECK-NEXT: [[RET:%.*]] = load atomic volatile i32, ptr [[VAL]] acquire, align 4
+; CHECK-NEXT: ret i32 [[RET]]
+;
+ %src = alloca <1 x i32>
+ %val = alloca <1 x i32>
+ %direct = alloca ptr
+ call void @llvm.memcpy.p0.p0.i64(ptr %val, ptr %src, i64 4, i1 false)
+ store ptr %val, ptr %direct
+ %indirect = load ptr, ptr %direct
+ %ret = load atomic volatile i32, ptr %indirect acquire, align 4
+ ret i32 %ret
+}
+
+define ptr @atomic_vector_ptr() {
+; CHECK-LABEL: define ptr @atomic_vector_ptr() {
+; CHECK-NEXT: [[SRC_SROA_0:%.*]] = alloca [4 x i8], align 8
+; CHECK-NEXT: [[VAL_SROA_0:%.*]] = alloca ptr, align 8
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[VAL_SROA_0]], ptr align 8 [[SRC_SROA_0]], i64 4, i1 false)
+; CHECK-NEXT: [[VAL_SROA_0_0_VAL_SROA_0_0_RET:%.*]] = load atomic volatile ptr, ptr [[VAL_SROA_0]] acquire, align 4
+; CHECK-NEXT: ret ptr [[VAL_SROA_0_0_VAL_SROA_0_0_RET]]
+;
+ %src = alloca <1 x ptr>
+ %val = alloca <1 x ptr>
+ %direct = alloca ptr
+ call void @llvm.memcpy.p0.p0.i64(ptr %val, ptr %src, i64 4, i1 false)
+ store ptr %val, ptr %direct
+ %indirect = load ptr, ptr %direct
+ %ret = load atomic volatile ptr, ptr %indirect acquire, align 4
+ ret ptr %ret
}
>From cf475055717cfca041ea204f9ec4053a9ed5afaf Mon Sep 17 00:00:00 2001
From: jofernau <Joe.Fernau at amd.com>
Date: Thu, 17 Oct 2024 00:06:05 -0400
Subject: [PATCH 04/10] memcpy size must be >=8 to generate load atomic <1 x
ptr>
---
llvm/test/Transforms/SROA/atomic-vector.ll | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/llvm/test/Transforms/SROA/atomic-vector.ll b/llvm/test/Transforms/SROA/atomic-vector.ll
index 1b6cc275681a80..6258475aaff841 100644
--- a/llvm/test/Transforms/SROA/atomic-vector.ll
+++ b/llvm/test/Transforms/SROA/atomic-vector.ll
@@ -37,16 +37,15 @@ define i32 @atomic_vector_int() {
define ptr @atomic_vector_ptr() {
; CHECK-LABEL: define ptr @atomic_vector_ptr() {
-; CHECK-NEXT: [[SRC_SROA_0:%.*]] = alloca [4 x i8], align 8
-; CHECK-NEXT: [[VAL_SROA_0:%.*]] = alloca ptr, align 8
-; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 8 [[VAL_SROA_0]], ptr align 8 [[SRC_SROA_0]], i64 4, i1 false)
+; CHECK-NEXT: [[VAL_SROA_0:%.*]] = alloca <1 x ptr>, align 8
+; CHECK-NEXT: store <1 x ptr> undef, ptr [[VAL_SROA_0]], align 8
; CHECK-NEXT: [[VAL_SROA_0_0_VAL_SROA_0_0_RET:%.*]] = load atomic volatile ptr, ptr [[VAL_SROA_0]] acquire, align 4
; CHECK-NEXT: ret ptr [[VAL_SROA_0_0_VAL_SROA_0_0_RET]]
;
%src = alloca <1 x ptr>
%val = alloca <1 x ptr>
%direct = alloca ptr
- call void @llvm.memcpy.p0.p0.i64(ptr %val, ptr %src, i64 4, i1 false)
+ call void @llvm.memcpy.p0.p0.i64(ptr %val, ptr %src, i64 8, i1 false)
store ptr %val, ptr %direct
%indirect = load ptr, ptr %direct
%ret = load atomic volatile ptr, ptr %indirect acquire, align 4
>From ef4fe92fb0b07a93c1db1bf5ff1b481080ad56b2 Mon Sep 17 00:00:00 2001
From: jofernau <Joe.Fernau at amd.com>
Date: Thu, 17 Oct 2024 23:25:57 -0400
Subject: [PATCH 05/10] Use isValidAtomicTy
---
llvm/include/llvm/IR/Instructions.h | 4 ++++
llvm/lib/IR/Instructions.cpp | 7 +++++++
llvm/lib/Transforms/Scalar/SROA.cpp | 4 +++-
3 files changed, 14 insertions(+), 1 deletion(-)
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index 88c8c709c306d9..9cd3212680e869 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -250,6 +250,10 @@ class LoadInst : public UnaryInstruction {
!isVolatile();
}
+ /// Returns false if this type would be invalid in the
+ /// creation of a load atomic instruction.
+ static bool isValidAtomicTy(Type *Ty, const DataLayout &DL);
+
Value *getPointerOperand() { return getOperand(0); }
const Value *getPointerOperand() const { return getOperand(0); }
static unsigned getPointerOperandIndex() { return 0U; }
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 009e0c03957c97..d3a5238cd36864 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -1247,6 +1247,13 @@ void LoadInst::AssertOK() {
"Ptr must have pointer type.");
}
+bool LoadInst::isValidAtomicTy(Type *Ty, const DataLayout &DL) {
+ if (!Ty->isIntOrPtrTy() && !Ty->isFloatingPointTy())
+ return false;
+ unsigned Size = DL.getTypeSizeInBits(Ty);
+ return Size >= 8 && !(Size & (Size - 1));
+}
+
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos) {
assert(Pos.isValid() &&
"Insertion position cannot be null when alignment not provided!");
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 450ecdf20ef009..5e0eda71d5fefe 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -2855,7 +2855,8 @@ class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
LLVM_DEBUG(dbgs() << " original: " << LI << "\n");
// load atomic vector would be generated, which is illegal
- if (LI.isAtomic() && NewAI.getAllocatedType()->isVectorTy())
+ if (LI.isAtomic() &&
+ !LoadInst::isValidAtomicTy(NewAI.getAllocatedType(), DL))
return false;
Value *OldOp = LI.getOperand(0);
@@ -2880,6 +2881,7 @@ class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
(canConvertValue(DL, NewAllocaTy, TargetTy) ||
(IsLoadPastEnd && NewAllocaTy->isIntegerTy() &&
TargetTy->isIntegerTy() && !LI.isVolatile()))) {
+
Value *NewPtr =
getPtrToNewAI(LI.getPointerAddressSpace(), LI.isVolatile());
LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), NewPtr,
>From bdebaa8ecc7591900dc1ee771d197aadd949e06b Mon Sep 17 00:00:00 2001
From: jofernau <Joe.Fernau at amd.com>
Date: Thu, 17 Oct 2024 23:48:51 -0400
Subject: [PATCH 06/10] Add AtomicOrdering to isValidAtomicTy interface
---
llvm/include/llvm/IR/Instructions.h | 3 ++-
llvm/lib/IR/Instructions.cpp | 5 ++++-
2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index 9cd3212680e869..cb5c3c25ddd1af 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -252,7 +252,8 @@ class LoadInst : public UnaryInstruction {
/// Returns false if this type would be invalid in the
/// creation of a load atomic instruction.
- static bool isValidAtomicTy(Type *Ty, const DataLayout &DL);
+ static bool isValidAtomicTy(Type *Ty, const DataLayout &DL,
+ AtomicOrdering AO = AtomicOrdering::NotAtomic);
Value *getPointerOperand() { return getOperand(0); }
const Value *getPointerOperand() const { return getOperand(0); }
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index d3a5238cd36864..f101d01a9579b9 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -1247,9 +1247,12 @@ void LoadInst::AssertOK() {
"Ptr must have pointer type.");
}
-bool LoadInst::isValidAtomicTy(Type *Ty, const DataLayout &DL) {
+bool LoadInst::isValidAtomicTy(Type *Ty, const DataLayout &DL,
+ AtomicOrdering AO) {
if (!Ty->isIntOrPtrTy() && !Ty->isFloatingPointTy())
return false;
+ if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease)
+ return false;
unsigned Size = DL.getTypeSizeInBits(Ty);
return Size >= 8 && !(Size & (Size - 1));
}
>From d68d923c7bf692407c5795c54f12c7966353c99a Mon Sep 17 00:00:00 2001
From: jofernau <Joe.Fernau at amd.com>
Date: Fri, 18 Oct 2024 02:37:26 -0400
Subject: [PATCH 07/10] Updated basictest.ll for checking getTypeSizeInBits
---
llvm/test/Transforms/SROA/basictest.ll | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/llvm/test/Transforms/SROA/basictest.ll b/llvm/test/Transforms/SROA/basictest.ll
index 145da5259fab36..916bd54b8d3da2 100644
--- a/llvm/test/Transforms/SROA/basictest.ll
+++ b/llvm/test/Transforms/SROA/basictest.ll
@@ -1332,10 +1332,10 @@ define void @PR15674(ptr %data, ptr %src, i32 %size) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP_SROA_0:%.*]] = alloca i32, align 4
; CHECK-NEXT: switch i32 [[SIZE:%.*]], label [[END:%.*]] [
-; CHECK-NEXT: i32 4, label [[BB4:%.*]]
-; CHECK-NEXT: i32 3, label [[BB3:%.*]]
-; CHECK-NEXT: i32 2, label [[BB2:%.*]]
-; CHECK-NEXT: i32 1, label [[BB1:%.*]]
+; CHECK-NEXT: i32 4, label [[BB4:%.*]]
+; CHECK-NEXT: i32 3, label [[BB3:%.*]]
+; CHECK-NEXT: i32 2, label [[BB2:%.*]]
+; CHECK-NEXT: i32 1, label [[BB1:%.*]]
; CHECK-NEXT: ]
; CHECK: bb4:
; CHECK-NEXT: [[SRC_GEP3:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i32 3
@@ -1971,7 +1971,7 @@ bb7:
define i32 @load_atomic_volatile_past_end() {
; CHECK-LABEL: @load_atomic_volatile_past_end(
; CHECK-NEXT: [[A:%.*]] = alloca i1, align 1
-; CHECK-NEXT: [[A_0_V:%.*]] = load atomic volatile i32, ptr [[A]] seq_cst, align 1
+; CHECK-NEXT: [[A_0_V:%.*]] = load atomic volatile i32, ptr [[A]] seq_cst, align 4
; CHECK-NEXT: ret i32 [[A_0_V]]
;
%a = alloca i1, align 1
@@ -1992,8 +1992,9 @@ define i32 @load_volatile_past_end() {
define i32 @load_atomic_past_end() {
; CHECK-LABEL: @load_atomic_past_end(
-; CHECK-NEXT: [[A_0_LOAD_EXT:%.*]] = zext i1 undef to i32
-; CHECK-NEXT: ret i32 [[A_0_LOAD_EXT]]
+; CHECK-NEXT: [[A:%.*]] = alloca i1, align 1
+; CHECK-NEXT: [[V:%.*]] = load atomic i32, ptr [[A]] seq_cst, align 4
+; CHECK-NEXT: ret i32 [[V]]
;
%a = alloca i1, align 1
%v = load atomic i32, ptr %a seq_cst, align 4
>From 1c1aae7abc53fa4506c98e92573008aa1a6740f9 Mon Sep 17 00:00:00 2001
From: jofernau <Joe.Fernau at amd.com>
Date: Fri, 18 Oct 2024 11:35:08 -0400
Subject: [PATCH 08/10] Revert basictest.ll changes and DL check
---
llvm/include/llvm/IR/Instructions.h | 3 +--
llvm/lib/IR/Instructions.cpp | 8 ++------
llvm/lib/Transforms/Scalar/SROA.cpp | 3 +--
llvm/test/Transforms/SROA/basictest.ll | 15 +++++++--------
4 files changed, 11 insertions(+), 18 deletions(-)
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index cb5c3c25ddd1af..8b3fc0b229c995 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -252,8 +252,7 @@ class LoadInst : public UnaryInstruction {
/// Returns false if this type would be invalid in the
/// creation of a load atomic instruction.
- static bool isValidAtomicTy(Type *Ty, const DataLayout &DL,
- AtomicOrdering AO = AtomicOrdering::NotAtomic);
+ static bool isValidAtomicTy(Type *Ty);
Value *getPointerOperand() { return getOperand(0); }
const Value *getPointerOperand() const { return getOperand(0); }
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index f101d01a9579b9..cc82f3ef1cc5d0 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -1247,14 +1247,10 @@ void LoadInst::AssertOK() {
"Ptr must have pointer type.");
}
-bool LoadInst::isValidAtomicTy(Type *Ty, const DataLayout &DL,
- AtomicOrdering AO) {
+bool LoadInst::isValidAtomicTy(Type *Ty) {
if (!Ty->isIntOrPtrTy() && !Ty->isFloatingPointTy())
return false;
- if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease)
- return false;
- unsigned Size = DL.getTypeSizeInBits(Ty);
- return Size >= 8 && !(Size & (Size - 1));
+ return true;
}
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos) {
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 5e0eda71d5fefe..c49d8ca9553afc 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -2855,8 +2855,7 @@ class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
LLVM_DEBUG(dbgs() << " original: " << LI << "\n");
// load atomic vector would be generated, which is illegal
- if (LI.isAtomic() &&
- !LoadInst::isValidAtomicTy(NewAI.getAllocatedType(), DL))
+ if (LI.isAtomic() && !LoadInst::isValidAtomicTy(NewAI.getAllocatedType()))
return false;
Value *OldOp = LI.getOperand(0);
diff --git a/llvm/test/Transforms/SROA/basictest.ll b/llvm/test/Transforms/SROA/basictest.ll
index 916bd54b8d3da2..145da5259fab36 100644
--- a/llvm/test/Transforms/SROA/basictest.ll
+++ b/llvm/test/Transforms/SROA/basictest.ll
@@ -1332,10 +1332,10 @@ define void @PR15674(ptr %data, ptr %src, i32 %size) {
; CHECK-NEXT: entry:
; CHECK-NEXT: [[TMP_SROA_0:%.*]] = alloca i32, align 4
; CHECK-NEXT: switch i32 [[SIZE:%.*]], label [[END:%.*]] [
-; CHECK-NEXT: i32 4, label [[BB4:%.*]]
-; CHECK-NEXT: i32 3, label [[BB3:%.*]]
-; CHECK-NEXT: i32 2, label [[BB2:%.*]]
-; CHECK-NEXT: i32 1, label [[BB1:%.*]]
+; CHECK-NEXT: i32 4, label [[BB4:%.*]]
+; CHECK-NEXT: i32 3, label [[BB3:%.*]]
+; CHECK-NEXT: i32 2, label [[BB2:%.*]]
+; CHECK-NEXT: i32 1, label [[BB1:%.*]]
; CHECK-NEXT: ]
; CHECK: bb4:
; CHECK-NEXT: [[SRC_GEP3:%.*]] = getelementptr inbounds i8, ptr [[SRC:%.*]], i32 3
@@ -1971,7 +1971,7 @@ bb7:
define i32 @load_atomic_volatile_past_end() {
; CHECK-LABEL: @load_atomic_volatile_past_end(
; CHECK-NEXT: [[A:%.*]] = alloca i1, align 1
-; CHECK-NEXT: [[A_0_V:%.*]] = load atomic volatile i32, ptr [[A]] seq_cst, align 4
+; CHECK-NEXT: [[A_0_V:%.*]] = load atomic volatile i32, ptr [[A]] seq_cst, align 1
; CHECK-NEXT: ret i32 [[A_0_V]]
;
%a = alloca i1, align 1
@@ -1992,9 +1992,8 @@ define i32 @load_volatile_past_end() {
define i32 @load_atomic_past_end() {
; CHECK-LABEL: @load_atomic_past_end(
-; CHECK-NEXT: [[A:%.*]] = alloca i1, align 1
-; CHECK-NEXT: [[V:%.*]] = load atomic i32, ptr [[A]] seq_cst, align 4
-; CHECK-NEXT: ret i32 [[V]]
+; CHECK-NEXT: [[A_0_LOAD_EXT:%.*]] = zext i1 undef to i32
+; CHECK-NEXT: ret i32 [[A_0_LOAD_EXT]]
;
%a = alloca i1, align 1
%v = load atomic i32, ptr %a seq_cst, align 4
>From 67e0560c8ba1db8144612dcc0a4499fa92cba674 Mon Sep 17 00:00:00 2001
From: jofernau <Joe.Fernau at amd.com>
Date: Sun, 20 Oct 2024 14:04:25 -0400
Subject: [PATCH 09/10] Add <2 x i32> test. Add DL as optional to
isValidAtomicTy
---
llvm/include/llvm/IR/Instructions.h | 3 ++-
llvm/lib/IR/Instructions.cpp | 10 +++++++++-
llvm/lib/Transforms/Scalar/SROA.cpp | 3 ++-
llvm/test/Transforms/SROA/atomic-vector.ll | 17 +++++++++++++++++
4 files changed, 30 insertions(+), 3 deletions(-)
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index 8b3fc0b229c995..8570657387cd20 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -252,7 +252,8 @@ class LoadInst : public UnaryInstruction {
/// Returns false if this type would be invalid in the
/// creation of a load atomic instruction.
- static bool isValidAtomicTy(Type *Ty);
+ static bool isValidAtomicTy(Type *Ty, const DataLayout *DL = nullptr,
+ AtomicOrdering AO = AtomicOrdering::NotAtomic);
Value *getPointerOperand() { return getOperand(0); }
const Value *getPointerOperand() const { return getOperand(0); }
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index cc82f3ef1cc5d0..0526217e3b7d70 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -1247,9 +1247,17 @@ void LoadInst::AssertOK() {
"Ptr must have pointer type.");
}
-bool LoadInst::isValidAtomicTy(Type *Ty) {
+bool LoadInst::isValidAtomicTy(Type *Ty, const DataLayout *DL,
+ AtomicOrdering AO) {
+ // TODO: Share methods with IR/Verifier.
if (!Ty->isIntOrPtrTy() && !Ty->isFloatingPointTy())
return false;
+ if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease)
+ return false;
+ if (DL) {
+ unsigned Size = DL->getTypeSizeInBits(Ty);
+ return Size >= 8 && !(Size & (Size - 1));
+ }
return true;
}
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index c49d8ca9553afc..3756c78c13cd8f 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -2854,7 +2854,8 @@ class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
bool visitLoadInst(LoadInst &LI) {
LLVM_DEBUG(dbgs() << " original: " << LI << "\n");
- // load atomic vector would be generated, which is illegal
+ // Load atomic vector would be generated, which is illegal.
+ // TODO: Generate a generic bitcast in machine codegen instead.
if (LI.isAtomic() && !LoadInst::isValidAtomicTy(NewAI.getAllocatedType()))
return false;
diff --git a/llvm/test/Transforms/SROA/atomic-vector.ll b/llvm/test/Transforms/SROA/atomic-vector.ll
index 6258475aaff841..3063b6bf0019fd 100644
--- a/llvm/test/Transforms/SROA/atomic-vector.ll
+++ b/llvm/test/Transforms/SROA/atomic-vector.ll
@@ -51,3 +51,20 @@ define ptr @atomic_vector_ptr() {
%ret = load atomic volatile ptr, ptr %indirect acquire, align 4
ret ptr %ret
}
+
+define i32 @atomic_2vector_int() {
+; CHECK-LABEL: define i32 @atomic_2vector_int() {
+; CHECK-NEXT: [[VAL_SROA_0:%.*]] = alloca i32, align 8
+; CHECK-NEXT: store i32 undef, ptr [[VAL_SROA_0]], align 8
+; CHECK-NEXT: [[VAL_SROA_0_0_VAL_SROA_0_0_RET:%.*]] = load atomic volatile i32, ptr [[VAL_SROA_0]] acquire, align 4
+; CHECK-NEXT: ret i32 [[VAL_SROA_0_0_VAL_SROA_0_0_RET]]
+;
+ %src = alloca <2 x i32>
+ %val = alloca <2 x i32>
+ %direct = alloca ptr
+ call void @llvm.memcpy.p0.p0.i64(ptr %val, ptr %src, i64 4, i1 false)
+ store ptr %val, ptr %direct
+ %indirect = load ptr, ptr %direct
+ %ret = load atomic volatile i32, ptr %indirect acquire, align 4
+ ret i32 %ret
+}
>From be9eeb792fa1cdd8e3f4cc9f95a874ce215afd62 Mon Sep 17 00:00:00 2001
From: jofernau <Joe.Fernau at amd.com>
Date: Mon, 21 Oct 2024 06:49:03 -0400
Subject: [PATCH 10/10] Added nonbyte test. Removed interface to
isValidAtomicTy.
---
llvm/include/llvm/IR/Instructions.h | 5 -----
llvm/lib/IR/Instructions.cpp | 14 --------------
llvm/lib/Transforms/Scalar/SROA.cpp | 2 +-
llvm/test/Transforms/SROA/atomic-vector.ll | 20 ++++++++++++++++++++
4 files changed, 21 insertions(+), 20 deletions(-)
diff --git a/llvm/include/llvm/IR/Instructions.h b/llvm/include/llvm/IR/Instructions.h
index 8570657387cd20..88c8c709c306d9 100644
--- a/llvm/include/llvm/IR/Instructions.h
+++ b/llvm/include/llvm/IR/Instructions.h
@@ -250,11 +250,6 @@ class LoadInst : public UnaryInstruction {
!isVolatile();
}
- /// Returns false if this type would be invalid in the
- /// creation of a load atomic instruction.
- static bool isValidAtomicTy(Type *Ty, const DataLayout *DL = nullptr,
- AtomicOrdering AO = AtomicOrdering::NotAtomic);
-
Value *getPointerOperand() { return getOperand(0); }
const Value *getPointerOperand() const { return getOperand(0); }
static unsigned getPointerOperandIndex() { return 0U; }
diff --git a/llvm/lib/IR/Instructions.cpp b/llvm/lib/IR/Instructions.cpp
index 0526217e3b7d70..009e0c03957c97 100644
--- a/llvm/lib/IR/Instructions.cpp
+++ b/llvm/lib/IR/Instructions.cpp
@@ -1247,20 +1247,6 @@ void LoadInst::AssertOK() {
"Ptr must have pointer type.");
}
-bool LoadInst::isValidAtomicTy(Type *Ty, const DataLayout *DL,
- AtomicOrdering AO) {
- // TODO: Share methods with IR/Verifier.
- if (!Ty->isIntOrPtrTy() && !Ty->isFloatingPointTy())
- return false;
- if (AO == AtomicOrdering::Release || AO == AtomicOrdering::AcquireRelease)
- return false;
- if (DL) {
- unsigned Size = DL->getTypeSizeInBits(Ty);
- return Size >= 8 && !(Size & (Size - 1));
- }
- return true;
-}
-
static Align computeLoadStoreDefaultAlign(Type *Ty, InsertPosition Pos) {
assert(Pos.isValid() &&
"Insertion position cannot be null when alignment not provided!");
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 3756c78c13cd8f..fb090e038ee00c 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -2856,7 +2856,7 @@ class AllocaSliceRewriter : public InstVisitor<AllocaSliceRewriter, bool> {
// Load atomic vector would be generated, which is illegal.
// TODO: Generate a generic bitcast in machine codegen instead.
- if (LI.isAtomic() && !LoadInst::isValidAtomicTy(NewAI.getAllocatedType()))
+ if (LI.isAtomic() && NewAI.getAllocatedType().isVectorTy())
return false;
Value *OldOp = LI.getOperand(0);
diff --git a/llvm/test/Transforms/SROA/atomic-vector.ll b/llvm/test/Transforms/SROA/atomic-vector.ll
index 3063b6bf0019fd..a836cc6f7cdf22 100644
--- a/llvm/test/Transforms/SROA/atomic-vector.ll
+++ b/llvm/test/Transforms/SROA/atomic-vector.ll
@@ -68,3 +68,23 @@ define i32 @atomic_2vector_int() {
%ret = load atomic volatile i32, ptr %indirect acquire, align 4
ret i32 %ret
}
+
+define i32 @atomic_2vector_nonbyte_illegal_int() {
+; CHECK-LABEL: define i32 @atomic_2vector_nonbyte_illegal_int() {
+; CHECK-NEXT: [[SRC_SROA_1:%.*]] = alloca i17, align 4
+; CHECK-NEXT: [[VAL_SROA_0:%.*]] = alloca i32, align 8
+; CHECK-NEXT: [[VAL_SROA_2:%.*]] = alloca i17, align 4
+; CHECK-NEXT: store i32 undef, ptr [[VAL_SROA_0]], align 8
+; CHECK-NEXT: call void @llvm.memcpy.p0.p0.i64(ptr align 4 [[VAL_SROA_2]], ptr align 4 [[SRC_SROA_1]], i64 4, i1 false)
+; CHECK-NEXT: [[VAL_SROA_0_0_VAL_SROA_0_0_RET:%.*]] = load atomic volatile i32, ptr [[VAL_SROA_0]] acquire, align 4
+; CHECK-NEXT: ret i32 [[VAL_SROA_0_0_VAL_SROA_0_0_RET]]
+;
+ %src = alloca <2 x i17>
+ %val = alloca <2 x i17>
+ %direct = alloca ptr
+ call void @llvm.memcpy.p0.p0.i64(ptr %val, ptr %src, i64 8, i1 false)
+ store ptr %val, ptr %direct
+ %indirect = load ptr, ptr %direct
+ %ret = load atomic volatile i32, ptr %indirect acquire, align 4
+ ret i32 %ret
+}
More information about the llvm-commits
mailing list