[llvm] 57a5548 - [SROA] Don't shrink volatile load past end

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 20 05:12:39 PDT 2023


Author: Nikita Popov
Date: 2023-09-20T14:12:31+02:00
New Revision: 57a554800b804a0d849858972822142e1f75d37f

URL: https://github.com/llvm/llvm-project/commit/57a554800b804a0d849858972822142e1f75d37f
DIFF: https://github.com/llvm/llvm-project/commit/57a554800b804a0d849858972822142e1f75d37f.diff

LOG: [SROA] Don't shrink volatile load past end

For volatile atomic, this may result in a verifier errors, if the
new alloca type is not legal for atomic accesses.

I've opted to disable this special case for volatile accesses in
general, as changing the size of the volatile access seems
dubious in any case.

Fixes https://github.com/llvm/llvm-project/issues/64721.

Added: 
    

Modified: 
    llvm/lib/Transforms/Scalar/SROA.cpp
    llvm/test/Transforms/SROA/basictest.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 891f5349bd9b2b0..9612799e48337cc 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -2713,7 +2713,7 @@ class llvm::sroa::AllocaSliceRewriter
                NewEndOffset == NewAllocaEndOffset &&
                (canConvertValue(DL, NewAllocaTy, TargetTy) ||
                 (IsLoadPastEnd && NewAllocaTy->isIntegerTy() &&
-                 TargetTy->isIntegerTy()))) {
+                 TargetTy->isIntegerTy() && !LI.isVolatile()))) {
       Value *NewPtr =
           getPtrToNewAI(LI.getPointerAddressSpace(), LI.isVolatile());
       LoadInst *NewLI = IRB.CreateAlignedLoad(NewAI.getAllocatedType(), NewPtr,

diff  --git a/llvm/test/Transforms/SROA/basictest.ll b/llvm/test/Transforms/SROA/basictest.ll
index de7e11d1e491c66..145da5259fab36a 100644
--- a/llvm/test/Transforms/SROA/basictest.ll
+++ b/llvm/test/Transforms/SROA/basictest.ll
@@ -1968,6 +1968,38 @@ bb7:
   ret void
 }
 
+define i32 @load_atomic_volatile_past_end() {
+; CHECK-LABEL: @load_atomic_volatile_past_end(
+; CHECK-NEXT:    [[A:%.*]] = alloca i1, align 1
+; CHECK-NEXT:    [[A_0_V:%.*]] = load atomic volatile i32, ptr [[A]] seq_cst, align 1
+; CHECK-NEXT:    ret i32 [[A_0_V]]
+;
+  %a = alloca i1, align 1
+  %v = load atomic volatile i32, ptr %a seq_cst, align 4
+  ret i32 %v
+}
+
+define i32 @load_volatile_past_end() {
+; CHECK-LABEL: @load_volatile_past_end(
+; CHECK-NEXT:    [[A:%.*]] = alloca i1, align 1
+; CHECK-NEXT:    [[A_0_V:%.*]] = load volatile i32, ptr [[A]], align 1
+; CHECK-NEXT:    ret i32 [[A_0_V]]
+;
+  %a = alloca i1, align 1
+  %v = load volatile i32, ptr %a, align 4
+  ret i32 %v
+}
+
+define i32 @load_atomic_past_end() {
+; CHECK-LABEL: @load_atomic_past_end(
+; CHECK-NEXT:    [[A_0_LOAD_EXT:%.*]] = zext i1 undef to i32
+; CHECK-NEXT:    ret i32 [[A_0_LOAD_EXT]]
+;
+  %a = alloca i1, align 1
+  %v = load atomic i32, ptr %a seq_cst, align 4
+  ret i32 %v
+}
+
 !0 = !{!1, !1, i64 0, i64 200}
 !1 = !{!2, i64 1, !"type_0"}
 !2 = !{!"root"}


        


More information about the llvm-commits mailing list