[llvm] 96ab74b - [InstCombine] remove undef loads, such as memcpy from undef (#143958)

via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 20 07:32:34 PDT 2025


Author: Jameson Nash
Date: 2025-06-20T10:32:31-04:00
New Revision: 96ab74bf175f46de4b6fbfc68deecd3567e42a52

URL: https://github.com/llvm/llvm-project/commit/96ab74bf175f46de4b6fbfc68deecd3567e42a52
DIFF: https://github.com/llvm/llvm-project/commit/96ab74bf175f46de4b6fbfc68deecd3567e42a52.diff

LOG: [InstCombine] remove undef loads, such as memcpy from undef (#143958)

Extend `isAllocSiteRemovable` to be able to check if the ModRef info
indicates the alloca is only Ref or only Mod, and be able to remove it
accordingly. It seemed that there were a surprising number of
benchmarks with this pattern which weren't getting optimized previously
(due to MemorySSA walk limits). There were somewhat more existing tests
than I'd like to have modified which were simply doing exactly this
pattern (and thus relying on undef memory). Claude code contributed the
new tests (and found an important typo that I'd made).

This implements the discussion in
https://github.com/llvm/llvm-project/pull/143782#discussion_r2142720376.

Added: 
    llvm/test/Transforms/InstCombine/dead-alloc-elim.ll

Modified: 
    clang/test/Misc/loop-opt-setup.c
    llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
    llvm/test/Transforms/InstCombine/and-or-icmps.ll
    llvm/test/Transforms/InstCombine/apint-shift.ll
    llvm/test/Transforms/InstCombine/call-cast-target.ll
    llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll
    llvm/test/Transforms/InstCombine/getelementptr.ll
    llvm/test/Transforms/InstCombine/malloc-free.ll
    llvm/test/Transforms/InstCombine/multiple-uses-load-bitcast-select.ll
    llvm/test/Transforms/InstCombine/objsize.ll
    llvm/test/Transforms/InstCombine/select-load.ll
    llvm/test/Transforms/InstCombine/shift-amount-reassociation.ll
    llvm/test/Transforms/InstCombine/vscale_gep.ll

Removed: 
    


################################################################################
diff  --git a/clang/test/Misc/loop-opt-setup.c b/clang/test/Misc/loop-opt-setup.c
index 01643e6073b56..c1c620e52200d 100644
--- a/clang/test/Misc/loop-opt-setup.c
+++ b/clang/test/Misc/loop-opt-setup.c
@@ -15,7 +15,7 @@ int foo(void) {
 // CHECK-NOT: br i1
 
 void Helper(void) {
-  const int *nodes[5];
+  const int *nodes[5] = {0};
   int num_active = 5;
 
   while (num_active)

diff  --git a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
index afd3359e22ff3..bcc73090277aa 100644
--- a/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstructionCombining.cpp
@@ -3277,12 +3277,13 @@ static bool isRemovableWrite(CallBase &CB, Value *UsedV,
   return Dest && Dest->Ptr == UsedV;
 }
 
-static bool isAllocSiteRemovable(Instruction *AI,
-                                 SmallVectorImpl<WeakTrackingVH> &Users,
-                                 const TargetLibraryInfo &TLI) {
+static std::optional<ModRefInfo>
+isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakTrackingVH> &Users,
+                     const TargetLibraryInfo &TLI, bool KnowInit) {
   SmallVector<Instruction*, 4> Worklist;
   const std::optional<StringRef> Family = getAllocationFamily(AI, &TLI);
   Worklist.push_back(AI);
+  ModRefInfo Access = KnowInit ? ModRefInfo::NoModRef : ModRefInfo::Mod;
 
   do {
     Instruction *PI = Worklist.pop_back_val();
@@ -3291,7 +3292,7 @@ static bool isAllocSiteRemovable(Instruction *AI,
       switch (I->getOpcode()) {
       default:
         // Give up the moment we see something we can't handle.
-        return false;
+        return std::nullopt;
 
       case Instruction::AddrSpaceCast:
       case Instruction::BitCast:
@@ -3306,10 +3307,10 @@ static bool isAllocSiteRemovable(Instruction *AI,
         // We also fold comparisons in some conditions provided the alloc has
         // not escaped (see isNeverEqualToUnescapedAlloc).
         if (!ICI->isEquality())
-          return false;
+          return std::nullopt;
         unsigned OtherIndex = (ICI->getOperand(0) == PI) ? 1 : 0;
         if (!isNeverEqualToUnescapedAlloc(ICI->getOperand(OtherIndex), TLI, AI))
-          return false;
+          return std::nullopt;
 
         // Do not fold compares to aligned_alloc calls, as they may have to
         // return null in case the required alignment cannot be satisfied,
@@ -3329,7 +3330,7 @@ static bool isAllocSiteRemovable(Instruction *AI,
         if (CB && TLI.getLibFunc(*CB->getCalledFunction(), TheLibFunc) &&
             TLI.has(TheLibFunc) && TheLibFunc == LibFunc_aligned_alloc &&
             !AlignmentAndSizeKnownValid(CB))
-          return false;
+          return std::nullopt;
         Users.emplace_back(I);
         continue;
       }
@@ -3339,14 +3340,21 @@ static bool isAllocSiteRemovable(Instruction *AI,
         if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
           switch (II->getIntrinsicID()) {
           default:
-            return false;
+            return std::nullopt;
 
           case Intrinsic::memmove:
           case Intrinsic::memcpy:
           case Intrinsic::memset: {
             MemIntrinsic *MI = cast<MemIntrinsic>(II);
-            if (MI->isVolatile() || MI->getRawDest() != PI)
-              return false;
+            if (MI->isVolatile())
+              return std::nullopt;
+            // Note: this could also be ModRef, but we can still interpret that
+            // as just Mod in that case.
+            ModRefInfo NewAccess =
+                MI->getRawDest() == PI ? ModRefInfo::Mod : ModRefInfo::Ref;
+            if ((Access & ~NewAccess) != ModRefInfo::NoModRef)
+              return std::nullopt;
+            Access |= NewAccess;
             [[fallthrough]];
           }
           case Intrinsic::assume:
@@ -3365,11 +3373,6 @@ static bool isAllocSiteRemovable(Instruction *AI,
           }
         }
 
-        if (isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
-          Users.emplace_back(I);
-          continue;
-        }
-
         if (Family && getFreedOperand(cast<CallBase>(I), &TLI) == PI &&
             getAllocationFamily(I, &TLI) == Family) {
           Users.emplace_back(I);
@@ -3383,12 +3386,33 @@ static bool isAllocSiteRemovable(Instruction *AI,
           continue;
         }
 
-        return false;
+        if (!isRefSet(Access) &&
+            isRemovableWrite(*cast<CallBase>(I), PI, TLI)) {
+          Access |= ModRefInfo::Mod;
+          Users.emplace_back(I);
+          continue;
+        }
+
+        return std::nullopt;
 
       case Instruction::Store: {
         StoreInst *SI = cast<StoreInst>(I);
         if (SI->isVolatile() || SI->getPointerOperand() != PI)
-          return false;
+          return std::nullopt;
+        if (isRefSet(Access))
+          return std::nullopt;
+        Access |= ModRefInfo::Mod;
+        Users.emplace_back(I);
+        continue;
+      }
+
+      case Instruction::Load: {
+        LoadInst *LI = cast<LoadInst>(I);
+        if (LI->isVolatile() || LI->getPointerOperand() != PI)
+          return std::nullopt;
+        if (isModSet(Access))
+          return std::nullopt;
+        Access |= ModRefInfo::Ref;
         Users.emplace_back(I);
         continue;
       }
@@ -3396,7 +3420,9 @@ static bool isAllocSiteRemovable(Instruction *AI,
       llvm_unreachable("missing a return?");
     }
   } while (!Worklist.empty());
-  return true;
+
+  assert(Access != ModRefInfo::ModRef);
+  return Access;
 }
 
 Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) {
@@ -3424,10 +3450,31 @@ Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) {
     DIB.reset(new DIBuilder(*MI.getModule(), /*AllowUnresolved=*/false));
   }
 
-  if (isAllocSiteRemovable(&MI, Users, TLI)) {
+  // Determine what getInitialValueOfAllocation would return without actually
+  // allocating the result.
+  bool KnowInitUndef = false;
+  bool KnowInitZero = false;
+  Constant *Init =
+      getInitialValueOfAllocation(&MI, &TLI, Type::getInt8Ty(MI.getContext()));
+  if (Init) {
+    if (isa<UndefValue>(Init))
+      KnowInitUndef = true;
+    else if (Init->isNullValue())
+      KnowInitZero = true;
+  }
+  // The various sanitizers don't actually return undef memory, but rather
+  // memory initialized with special forms of runtime poison
+  auto &F = *MI.getFunction();
+  if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
+      F.hasFnAttribute(Attribute::SanitizeAddress))
+    KnowInitUndef = false;
+
+  auto Removable =
+      isAllocSiteRemovable(&MI, Users, TLI, KnowInitZero | KnowInitUndef);
+  if (Removable) {
     for (unsigned i = 0, e = Users.size(); i != e; ++i) {
-      // Lowering all @llvm.objectsize calls first because they may
-      // use a bitcast/GEP of the alloca we are removing.
+      // Lowering all @llvm.objectsize and MTI calls first because they may use
+      // a bitcast/GEP of the alloca we are removing.
       if (!Users[i])
        continue;
 
@@ -3444,6 +3491,17 @@ Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) {
           eraseInstFromFunction(*I);
           Users[i] = nullptr; // Skip examining in the next loop.
         }
+        if (auto *MTI = dyn_cast<MemTransferInst>(I)) {
+          if (KnowInitZero && isRefSet(*Removable)) {
+            IRBuilderBase::InsertPointGuard Guard(Builder);
+            Builder.SetInsertPoint(MTI);
+            auto *M = Builder.CreateMemSet(
+                MTI->getRawDest(),
+                ConstantInt::get(Type::getInt8Ty(MI.getContext()), 0),
+                MTI->getLength(), MTI->getDestAlign());
+            M->copyMetadata(*MTI);
+          }
+        }
       }
     }
     for (unsigned i = 0, e = Users.size(); i != e; ++i) {
@@ -3466,7 +3524,14 @@ Instruction *InstCombinerImpl::visitAllocSite(Instruction &MI) {
       } else {
         // Casts, GEP, or anything else: we're about to delete this instruction,
         // so it can not have any valid uses.
-        replaceInstUsesWith(*I, PoisonValue::get(I->getType()));
+        Constant *Replace;
+        if (isa<LoadInst>(I)) {
+          assert(KnowInitZero || KnowInitUndef);
+          Replace = KnowInitUndef ? UndefValue::get(I->getType())
+                                  : Constant::getNullValue(I->getType());
+        } else
+          Replace = PoisonValue::get(I->getType());
+        replaceInstUsesWith(*I, Replace);
       }
       eraseInstFromFunction(*I);
     }

diff  --git a/llvm/test/Transforms/InstCombine/and-or-icmps.ll b/llvm/test/Transforms/InstCombine/and-or-icmps.ll
index 8824ae48417b0..42e5020748129 100644
--- a/llvm/test/Transforms/InstCombine/and-or-icmps.ll
+++ b/llvm/test/Transforms/InstCombine/and-or-icmps.ll
@@ -364,23 +364,77 @@ define <2 x i1> @and_ne_with_
diff _one_splatvec(<2 x i32> %x) {
 
 define void @simplify_before_foldAndOfICmps(ptr %p) {
 ; CHECK-LABEL: @simplify_before_foldAndOfICmps(
-; CHECK-NEXT:    [[A8:%.*]] = alloca i16, align 2
-; CHECK-NEXT:    [[L7:%.*]] = load i16, ptr [[A8]], align 2
+; CHECK-NEXT:    store i1 true, ptr [[P:%.*]], align 1
+; CHECK-NEXT:    store ptr null, ptr [[P]], align 8
+; CHECK-NEXT:    ret void
+;
+  %A8 = alloca i16
+  %L7 = load i16, ptr %A8
+  %G21 = getelementptr i16, ptr %A8, i8 -1
+  %B11 = udiv i16 %L7, -1
+  %G4 = getelementptr i16, ptr %A8, i16 %B11
+  %L2 = load i16, ptr %G4
+  %L = load i16, ptr %G4
+  %B23 = mul i16 %B11, %B11
+  %L4 = load i16, ptr %A8
+  %B21 = sdiv i16 %L7, %L4
+  %B7 = sub i16 0, %B21
+  %B18 = mul i16 %B23, %B7
+  %C10 = icmp ugt i16 %L, %B11
+  %B20 = and i16 %L7, %L2
+  %B1 = mul i1 %C10, true
+  %C5 = icmp sle i16 %B21, %L
+  %C11 = icmp ule i16 %B21, %L
+  %C7 = icmp slt i16 %B20, 0
+  %B29 = srem i16 %L4, %B18
+  %B15 = add i1 %C7, %C10
+  %B19 = add i1 %C11, %B15
+  %C6 = icmp sge i1 %C11, %B19
+  %B33 = or i16 %B29, %L4
+  %C13 = icmp uge i1 %C5, %B1
+  %C3 = icmp ult i1 %C13, %C6
+  store i16 undef, ptr %G21
+  %C18 = icmp ule i1 %C10, %C7
+  %G26 = getelementptr i1, ptr null, i1 %C3
+  store i16 %B33, ptr %p
+  store i1 %C18, ptr %p
+  store ptr %G26, ptr %p
+  ret void
+}
+
+define void @simplify_before_foldAndOfICmps2(ptr %p, ptr %A8) "instcombine-no-verify-fixpoint" {
+; CHECK-LABEL: @simplify_before_foldAndOfICmps2(
+; CHECK-NEXT:    [[L7:%.*]] = load i16, ptr [[A8:%.*]], align 2
 ; CHECK-NEXT:    [[TMP1:%.*]] = icmp eq i16 [[L7]], -1
 ; CHECK-NEXT:    [[B11:%.*]] = zext i1 [[TMP1]] to i16
-; CHECK-NEXT:    [[C10:%.*]] = icmp ugt i16 [[L7]], [[B11]]
-; CHECK-NEXT:    [[C7:%.*]] = icmp slt i16 [[L7]], 0
-; CHECK-NEXT:    [[C3:%.*]] = and i1 [[C7]], [[C10]]
-; CHECK-NEXT:    [[TMP2:%.*]] = xor i1 [[C10]], true
-; CHECK-NEXT:    [[C18:%.*]] = or i1 [[C7]], [[TMP2]]
-; CHECK-NEXT:    [[TMP3:%.*]] = sext i1 [[C3]] to i64
+; CHECK-NEXT:    [[TMP2:%.*]] = zext i1 [[TMP1]] to i64
+; CHECK-NEXT:    [[G4:%.*]] = getelementptr i16, ptr [[A8]], i64 [[TMP2]]
+; CHECK-NEXT:    [[L2:%.*]] = load i16, ptr [[G4]], align 2
+; CHECK-NEXT:    [[L4:%.*]] = load i16, ptr [[A8]], align 2
+; CHECK-NEXT:    [[B21:%.*]] = sdiv i16 [[L7]], [[L4]]
+; CHECK-NEXT:    [[TMP5:%.*]] = select i1 [[TMP1]], i16 [[B21]], i16 0
+; CHECK-NEXT:    [[B18:%.*]] = sub i16 0, [[TMP5]]
+; CHECK-NEXT:    [[C11:%.*]] = icmp ugt i16 [[L2]], [[B11]]
+; CHECK-NEXT:    [[B20:%.*]] = and i16 [[L7]], [[L2]]
+; CHECK-NEXT:    [[C5:%.*]] = icmp sgt i16 [[B21]], [[L2]]
+; CHECK-NEXT:    [[C12:%.*]] = icmp ule i16 [[B21]], [[L2]]
+; CHECK-NEXT:    [[C10:%.*]] = icmp slt i16 [[B20]], 0
+; CHECK-NEXT:    [[B29:%.*]] = srem i16 [[L4]], [[B18]]
+; CHECK-NEXT:    [[B15:%.*]] = xor i1 [[C10]], [[C11]]
+; CHECK-NEXT:    [[TMP6:%.*]] = and i1 [[C12]], [[B15]]
+; CHECK-NEXT:    [[C6:%.*]] = xor i1 [[TMP6]], true
+; CHECK-NEXT:    [[B33:%.*]] = or i16 [[B29]], [[L4]]
+; CHECK-NEXT:    [[C3:%.*]] = and i1 [[C5]], [[C6]]
+; CHECK-NEXT:    [[C4:%.*]] = and i1 [[C3]], [[C11]]
+; CHECK-NEXT:    [[TMP4:%.*]] = xor i1 [[C11]], true
+; CHECK-NEXT:    [[C18:%.*]] = or i1 [[C10]], [[TMP4]]
+; CHECK-NEXT:    [[TMP3:%.*]] = sext i1 [[C4]] to i64
 ; CHECK-NEXT:    [[G26:%.*]] = getelementptr i1, ptr null, i64 [[TMP3]]
-; CHECK-NEXT:    store i16 [[L7]], ptr [[P:%.*]], align 2
+; CHECK-NEXT:    store i16 [[B33]], ptr [[P:%.*]], align 2
 ; CHECK-NEXT:    store i1 [[C18]], ptr [[P]], align 1
 ; CHECK-NEXT:    store ptr [[G26]], ptr [[P]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  %A8 = alloca i16
   %L7 = load i16, ptr %A8
   %G21 = getelementptr i16, ptr %A8, i8 -1
   %B11 = udiv i16 %L7, -1

diff  --git a/llvm/test/Transforms/InstCombine/apint-shift.ll b/llvm/test/Transforms/InstCombine/apint-shift.ll
index 3cc530bdbd021..4dd0811bb7ecb 100644
--- a/llvm/test/Transforms/InstCombine/apint-shift.ll
+++ b/llvm/test/Transforms/InstCombine/apint-shift.ll
@@ -562,11 +562,10 @@ define i40 @test26(i40 %A) {
 
 ; OSS-Fuzz #9880
 ; https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=9880
-define i177 @ossfuzz_9880(i177 %X) {
+define i177 @ossfuzz_9880(i177 %X, ptr %A) {
 ; CHECK-LABEL: @ossfuzz_9880(
 ; CHECK-NEXT:    ret i177 0
 ;
-  %A = alloca i177
   %L1 = load i177, ptr %A
   %B = or i177 0, -1
   %B5 = udiv i177 %L1, %B

diff  --git a/llvm/test/Transforms/InstCombine/call-cast-target.ll b/llvm/test/Transforms/InstCombine/call-cast-target.ll
index 2cedc6c81d735..2f4b4ad2409e6 100644
--- a/llvm/test/Transforms/InstCombine/call-cast-target.ll
+++ b/llvm/test/Transforms/InstCombine/call-cast-target.ll
@@ -110,19 +110,17 @@ entry:
 
 declare i1 @fn5(ptr byval({ i32, i32 }) align 4 %r)
 
-define i1 @test5() {
-; CHECK-LABEL: define i1 @test5() {
-; CHECK-NEXT:    [[TMP1:%.*]] = alloca { i32, i32 }, align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[TMP1]], i32 4
+define i1 @test5(ptr %ptr) {
+; CHECK-LABEL: define i1 @test5(ptr %ptr) {
+; CHECK-NEXT:    [[TMP2:%.*]] = load i32, ptr [[PTR:%.*]], align 4
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds nuw i8, ptr [[PTR]], i32 4
 ; CHECK-NEXT:    [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
 ; CHECK-NEXT:    [[TMP5:%.*]] = call i1 @fn5(i32 [[TMP2]], i32 [[TMP4]])
 ; CHECK-NEXT:    ret i1 [[TMP5]]
 ;
-  %1 = alloca { i32, i32 }, align 4
-  %2 = getelementptr inbounds { i32, i32 }, ptr %1, i32 0, i32 0
+  %2 = getelementptr inbounds { i32, i32 }, ptr %ptr, i32 0, i32 0
   %3 = load i32, ptr %2, align 4
-  %4 = getelementptr inbounds { i32, i32 }, ptr %1, i32 0, i32 1
+  %4 = getelementptr inbounds { i32, i32 }, ptr %ptr, i32 0, i32 1
   %5 = load i32, ptr %4, align 4
   %6 = call i1 @fn5(i32 %3, i32 %5)
   ret i1 %6

diff  --git a/llvm/test/Transforms/InstCombine/dead-alloc-elim.ll b/llvm/test/Transforms/InstCombine/dead-alloc-elim.ll
new file mode 100644
index 0000000000000..b135f76f709f1
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/dead-alloc-elim.ll
@@ -0,0 +1,140 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+declare noalias ptr @calloc(i32, i32) nounwind allockind("alloc,zeroed") allocsize(0,1) "alloc-family"="malloc"
+declare void @free(ptr) allockind("free") "alloc-family"="malloc"
+
+; Test load from uninitialized alloca - should be removed and replaced with undef
+define i32 @test_load_uninitialized_alloca() {
+; CHECK-LABEL: @test_load_uninitialized_alloca(
+; CHECK-NEXT:    ret i32 undef
+;
+  %a = alloca i32
+  %v = load i32, ptr %a
+  ret i32 %v
+}
+
+; Test load from zero-initialized malloc - should be removed and replaced with zero
+define i32 @test_load_zero_initialized_malloc() {
+; CHECK-LABEL: @test_load_zero_initialized_malloc(
+; CHECK-NEXT:    ret i32 0
+;
+  %a = call ptr @calloc(i32 1, i32 4)
+  %v = load i32, ptr %a
+  call void @free(ptr %a)
+  ret i32 %v
+}
+
+; Test memcpy from uninitialized source - should be removed
+define void @test_memcpy_from_uninitialized_alloca(ptr %dest) {
+; CHECK-LABEL: @test_memcpy_from_uninitialized_alloca(
+; CHECK-NEXT:    ret void
+;
+  %src = alloca i32, align 1
+  call void @llvm.memcpy.p0.p0.i32(ptr %src, ptr %src, i32 4, i1 false)
+  call void @llvm.memcpy.p0.p0.i32(ptr %dest, ptr %src, i32 4, i1 false)
+  ret void
+}
+
+; Test memcpy from zeroed source - should transform to memset with zero
+define void @test_memcpy_from_uninitialized_calloc(ptr %dest) {
+; CHECK-LABEL: @test_memcpy_from_uninitialized_calloc(
+; CHECK-NEXT:    call void @llvm.memset.p0.i32(ptr noundef nonnull align 1 dereferenceable(16) [[DEST:%.*]], i8 0, i32 16, i1 false)
+; CHECK-NEXT:    ret void
+;
+  %src = call ptr @calloc(i32 1, i32 16)
+  call void @llvm.memcpy.p0.p0.i32(ptr %dest, ptr %src, i32 16, i1 false)
+  call void @free(ptr %src)
+  ret void
+}
+
+; Test mixed read/write pattern - should not be removable due to write before read
+define i32 @test_write_then_read_alloca() {
+; CHECK-LABEL: @test_write_then_read_alloca(
+; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    store i8 42, ptr [[A]], align 1
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT:    ret i32 [[V]]
+;
+  %a = alloca i32
+  store i8 42, ptr %a
+  %v = load i32, ptr %a
+  ret i32 %v
+}
+
+; Test read then write pattern - should not be removable due to conflicting access
+define void @test_read_then_write_alloca() {
+; CHECK-LABEL: @test_read_then_write_alloca(
+; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    [[V:%.*]] = load i32, ptr [[A]], align 4
+; CHECK-NEXT:    [[V8:%.*]] = trunc i32 [[V]] to i8
+; CHECK-NEXT:    store i8 [[V8]], ptr [[A]], align 1
+; CHECK-NEXT:    ret void
+;
+  %a = alloca i32
+  %v = load i32, ptr %a
+  %v8 = trunc i32 %v to i8
+  store i8 %v8, ptr %a
+  ret void
+}
+
+; Test load through GEP from uninitialized alloca
+define i8 @test_load_gep_uninitialized_alloca() {
+; CHECK-LABEL: @test_load_gep_uninitialized_alloca(
+; CHECK-NEXT:    ret i8 undef
+;
+  %a = alloca [4 x i8]
+  %gep = getelementptr [4 x i8], ptr %a, i32 0, i32 2
+  %v = load i8, ptr %gep
+  ret i8 %v
+}
+
+; Test load through bitcast from uninitialized alloca
+define i16 @test_load_bitcast_uninitialized_alloca() {
+; CHECK-LABEL: @test_load_bitcast_uninitialized_alloca(
+; CHECK-NEXT:    ret i16 undef
+;
+  %a = alloca i32
+  %bc = bitcast ptr %a to ptr
+  %v = load i16, ptr %bc
+  ret i16 %v
+}
+
+; Test memmove from zero-initialized malloc
+define void @test_memmove_from_zero_initialized_malloc(ptr %dest) {
+; CHECK-LABEL: @test_memmove_from_zero_initialized_malloc(
+; CHECK-NEXT:    call void @llvm.memset.p0.i32(ptr noundef nonnull align 1 dereferenceable(32) [[DEST:%.*]], i8 0, i32 32, i1 false)
+; CHECK-NEXT:    ret void
+;
+  %src = call ptr @calloc(i32 8, i32 4)
+  call void @llvm.memmove.p0.p0.i32(ptr %dest, ptr %src, i32 32, i1 false)
+  call void @free(ptr %src)
+  ret void
+}
+
+; Test multiple loads from same uninitialized alloca
+define { i32, i32 } @test_multiple_loads_uninitialized_alloca() {
+; CHECK-LABEL: @test_multiple_loads_uninitialized_alloca(
+; CHECK-NEXT:    ret { i32, i32 } undef
+;
+  %a = alloca [2 x i32]
+  %gep1 = getelementptr [2 x i32], ptr %a, i32 0, i32 0
+  %gep2 = getelementptr [2 x i32], ptr %a, i32 0, i32 1
+  %v1 = load i32, ptr %gep1
+  %v2 = load i32, ptr %gep2
+  %ret = insertvalue { i32, i32 } { i32 undef, i32 poison }, i32 %v1, 0
+  %ret2 = insertvalue { i32, i32 } %ret, i32 %v2, 1
+  ret { i32, i32 } %ret2
+}
+
+; Test that volatile operations prevent removal
+define i32 @test_volatile_load_prevents_removal() {
+; CHECK-LABEL: @test_volatile_load_prevents_removal(
+; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 4
+; CHECK-NEXT:    [[V:%.*]] = load volatile i32, ptr [[A]], align 4
+; CHECK-NEXT:    ret i32 [[V]]
+;
+  %a = alloca i32
+  %v = load volatile i32, ptr %a
+  ret i32 %v
+}

diff  --git a/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll b/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll
index 15eb3e15ea44a..af18a427ee372 100644
--- a/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll
+++ b/llvm/test/Transforms/InstCombine/fp-ret-bitcast.ll
@@ -12,11 +12,11 @@ target datalayout = "E-p:64:64:64-a0:0:8-f32:32:32-f64:64:64-i1:8:8-i8:8:8-i16:1
 @"\01L_OBJC_METH_VAR_NAME_112" = internal global [15 x i8] c"whiteComponent\00", section "__TEXT,__cstring,cstring_literals"
 @"\01L_OBJC_SELECTOR_REFERENCES_81" = internal global ptr @"\01L_OBJC_METH_VAR_NAME_112", section "__OBJC,__message_refs,literal_pointers,no_dead_strip"
 
-define void @bork() nounwind  {
+define void @bork(ptr %color, ptr %color.466) nounwind  {
 ; CHECK-LABEL: @bork(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[COLOR:%.*]] = alloca ptr, align 8
-; CHECK-NEXT:    [[TMP103:%.*]] = load ptr, ptr [[COLOR]], align 4
+; CHECK-NEXT:    [[TMP103:%.*]] = load ptr, ptr [[COLOR:%.*]], align 4
+; CHECK-NEXT:    store ptr [[TMP103]], ptr [[COLOR_466:%.*]], align 4
 ; CHECK-NEXT:    [[TMP105:%.*]] = load ptr, ptr @"\01L_OBJC_SELECTOR_REFERENCES_81", align 4
 ; CHECK-NEXT:    [[TMP107:%.*]] = call float @objc_msgSend_fpret(ptr [[TMP103]], ptr [[TMP105]]) #[[ATTR0:[0-9]+]]
 ; CHECK-NEXT:    br label [[EXIT:%.*]]
@@ -24,8 +24,6 @@ define void @bork() nounwind  {
 ; CHECK-NEXT:    ret void
 ;
 entry:
-  %color = alloca ptr
-  %color.466 = alloca ptr
   %tmp103 = load ptr, ptr %color, align 4
   store ptr %tmp103, ptr %color.466, align 4
   %tmp105 = load ptr, ptr @"\01L_OBJC_SELECTOR_REFERENCES_81", align 4

diff  --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll
index e78d70058c146..7568c6edc429c 100644
--- a/llvm/test/Transforms/InstCombine/getelementptr.ll
+++ b/llvm/test/Transforms/InstCombine/getelementptr.ll
@@ -580,13 +580,11 @@ define i32 @test20_as1(ptr addrspace(1) %P, i32 %A, i32 %B) {
 }
 
 
-define i32 @test21() {
+define i32 @test21(ptr %pbob1) {
 ; CHECK-LABEL: @test21(
-; CHECK-NEXT:    [[PBOB1:%.*]] = alloca [[INTSTRUCT:%.*]], align 8
-; CHECK-NEXT:    [[RVAL:%.*]] = load i32, ptr [[PBOB1]], align 4
+; CHECK-NEXT:    [[RVAL:%.*]] = load i32, ptr [[PBOB1:%.*]], align 4
 ; CHECK-NEXT:    ret i32 [[RVAL]]
 ;
-  %pbob1 = alloca %intstruct
   %pbob2 = getelementptr %intstruct, ptr %pbob1
   %rval = load i32, ptr %pbob2
   ret i32 %rval
@@ -654,18 +652,16 @@ define i1 @test26(ptr %arr) {
   %struct.siginfo_t = type { i32, i32, i32, { { i32, i32, [0 x i8], %struct.sigval_t, i32 }, [88 x i8] } }
   %struct.sigval_t = type { ptr }
 
-define i32 @test27(ptr %to, ptr %from) {
+define i32 @test27(ptr %to, ptr %from, ptr %from_addr) {
 ; CHECK-LABEL: @test27(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[FROM_ADDR:%.*]] = alloca ptr, align 8
-; CHECK-NEXT:    [[T344:%.*]] = load ptr, ptr [[FROM_ADDR]], align 8
+; CHECK-NEXT:    [[T344:%.*]] = load ptr, ptr [[FROM_ADDR:%.*]], align 8
 ; CHECK-NEXT:    [[T348:%.*]] = getelementptr i8, ptr [[T344]], i64 24
 ; CHECK-NEXT:    [[T351:%.*]] = load i32, ptr [[T348]], align 8
 ; CHECK-NEXT:    [[T360:%.*]] = call i32 asm sideeffect "...", "=r,ir,*m,i,0,~{dirflag},~{fpsr},~{flags}"(i32 [[T351]], ptr elementtype([[STRUCT___LARGE_STRUCT:%.*]]) null, i32 -14, i32 0) #[[ATTR0:[0-9]+]]
 ; CHECK-NEXT:    unreachable
 ;
 entry:
-  %from_addr = alloca ptr
   %t344 = load ptr, ptr %from_addr, align 8
   %t345 = getelementptr %struct.siginfo_t, ptr %t344, i32 0, i32 3
   %t346 = getelementptr { { i32, i32, [0 x i8], %struct.sigval_t, i32 }, [88 x i8] }, ptr %t345, i32 0, i32 0
@@ -1345,10 +1341,7 @@ declare noalias ptr @malloc(i64) nounwind allockind("alloc,uninitialized") alloc
 define i32 @test_gep_bitcast_malloc(ptr %a) {
 ; CHECK-LABEL: @test_gep_bitcast_malloc(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[CALL:%.*]] = call noalias dereferenceable_or_null(16) ptr @malloc(i64 16)
-; CHECK-NEXT:    [[G3:%.*]] = getelementptr i8, ptr [[CALL]], i64 12
-; CHECK-NEXT:    [[A_C:%.*]] = load i32, ptr [[G3]], align 4
-; CHECK-NEXT:    ret i32 [[A_C]]
+; CHECK-NEXT:    ret i32 undef
 ;
 entry:
   %call = call noalias ptr @malloc(i64 16) #2

diff  --git a/llvm/test/Transforms/InstCombine/malloc-free.ll b/llvm/test/Transforms/InstCombine/malloc-free.ll
index 1f556821270a2..989074f97aaf6 100644
--- a/llvm/test/Transforms/InstCombine/malloc-free.ll
+++ b/llvm/test/Transforms/InstCombine/malloc-free.ll
@@ -133,17 +133,13 @@ define void @test4() {
 
 define void @test5(ptr %ptr, ptr %esc) {
 ; CHECK-LABEL: @test5(
-; CHECK-NEXT:    [[A:%.*]] = call dereferenceable_or_null(700) ptr @malloc(i32 700)
-; CHECK-NEXT:    [[B:%.*]] = call dereferenceable_or_null(700) ptr @malloc(i32 700)
 ; CHECK-NEXT:    [[C:%.*]] = call dereferenceable_or_null(700) ptr @malloc(i32 700)
 ; CHECK-NEXT:    [[D:%.*]] = call dereferenceable_or_null(700) ptr @malloc(i32 700)
 ; CHECK-NEXT:    [[E:%.*]] = call dereferenceable_or_null(700) ptr @malloc(i32 700)
 ; CHECK-NEXT:    [[F:%.*]] = call dereferenceable_or_null(700) ptr @malloc(i32 700)
 ; CHECK-NEXT:    [[G:%.*]] = call dereferenceable_or_null(700) ptr @malloc(i32 700)
-; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(32) [[PTR:%.*]], ptr noundef nonnull align 1 dereferenceable(32) [[A]], i32 32, i1 false)
-; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i32(ptr noundef nonnull align 1 dereferenceable(32) [[PTR]], ptr noundef nonnull align 1 dereferenceable(32) [[B]], i32 32, i1 false)
 ; CHECK-NEXT:    store ptr [[C]], ptr [[ESC:%.*]], align 4
-; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr [[D]], ptr [[PTR]], i32 32, i1 true)
+; CHECK-NEXT:    call void @llvm.memcpy.p0.p0.i32(ptr [[D]], ptr [[PTR:%.*]], i32 32, i1 true)
 ; CHECK-NEXT:    call void @llvm.memmove.p0.p0.i32(ptr [[E]], ptr [[PTR]], i32 32, i1 true)
 ; CHECK-NEXT:    call void @llvm.memset.p0.i32(ptr [[F]], i8 5, i32 32, i1 true)
 ; CHECK-NEXT:    store volatile i8 4, ptr [[G]], align 1

diff  --git a/llvm/test/Transforms/InstCombine/multiple-uses-load-bitcast-select.ll b/llvm/test/Transforms/InstCombine/multiple-uses-load-bitcast-select.ll
index 38fca0314ae11..5d3512e10f418 100644
--- a/llvm/test/Transforms/InstCombine/multiple-uses-load-bitcast-select.ll
+++ b/llvm/test/Transforms/InstCombine/multiple-uses-load-bitcast-select.ll
@@ -1,20 +1,16 @@
 ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
 ; RUN: opt < %s -passes=instcombine -S -data-layout="E-m:e-i1:8:16-i8:8:16-i64:64-f128:64-v128:64-a:8:16-n32:64" | FileCheck %s
 
-define void @PR35618(ptr %st1, ptr %st2) {
+define void @PR35618(ptr %st1, ptr %st2, ptr %y1, ptr %z1) {
 ; CHECK-LABEL: @PR35618(
-; CHECK-NEXT:    [[Y1:%.*]] = alloca double, align 8
-; CHECK-NEXT:    [[Z1:%.*]] = alloca double, align 8
-; CHECK-NEXT:    [[LD1:%.*]] = load double, ptr [[Y1]], align 8
-; CHECK-NEXT:    [[LD2:%.*]] = load double, ptr [[Z1]], align 8
+; CHECK-NEXT:    [[LD1:%.*]] = load double, ptr [[Y1:%.*]], align 8
+; CHECK-NEXT:    [[LD2:%.*]] = load double, ptr [[Z1:%.*]], align 8
 ; CHECK-NEXT:    [[TMP:%.*]] = fcmp olt double [[LD1]], [[LD2]]
 ; CHECK-NEXT:    [[TMP12_V:%.*]] = select i1 [[TMP]], double [[LD1]], double [[LD2]]
 ; CHECK-NEXT:    store double [[TMP12_V]], ptr [[ST1:%.*]], align 8
 ; CHECK-NEXT:    store double [[TMP12_V]], ptr [[ST2:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  %y1 = alloca double
-  %z1 = alloca double
   %ld1 = load double, ptr %y1
   %ld2 = load double, ptr %z1
   %tmp = fcmp olt double %ld1, %ld2
@@ -25,20 +21,16 @@ define void @PR35618(ptr %st1, ptr %st2) {
   ret void
 }
 
-define void @PR35618_asan(ptr %st1, ptr %st2) sanitize_address {
+define void @PR35618_asan(ptr %st1, ptr %st2, ptr %y1, ptr %z1) sanitize_address {
 ; CHECK-LABEL: @PR35618_asan(
-; CHECK-NEXT:    [[Y1:%.*]] = alloca double, align 8
-; CHECK-NEXT:    [[Z1:%.*]] = alloca double, align 8
-; CHECK-NEXT:    [[LD1:%.*]] = load double, ptr [[Y1]], align 8
-; CHECK-NEXT:    [[LD2:%.*]] = load double, ptr [[Z1]], align 8
+; CHECK-NEXT:    [[LD1:%.*]] = load double, ptr [[Y1:%.*]], align 8
+; CHECK-NEXT:    [[LD2:%.*]] = load double, ptr [[Z1:%.*]], align 8
 ; CHECK-NEXT:    [[TMP:%.*]] = fcmp olt double [[LD1]], [[LD2]]
 ; CHECK-NEXT:    [[TMP12_V:%.*]] = select i1 [[TMP]], double [[LD1]], double [[LD2]]
 ; CHECK-NEXT:    store double [[TMP12_V]], ptr [[ST1:%.*]], align 8
 ; CHECK-NEXT:    store double [[TMP12_V]], ptr [[ST2:%.*]], align 8
 ; CHECK-NEXT:    ret void
 ;
-  %y1 = alloca double
-  %z1 = alloca double
   %ld1 = load double, ptr %y1
   %ld2 = load double, ptr %z1
   %tmp = fcmp olt double %ld1, %ld2

diff  --git a/llvm/test/Transforms/InstCombine/objsize.ll b/llvm/test/Transforms/InstCombine/objsize.ll
index 1c33412303c19..39f6f493782d2 100644
--- a/llvm/test/Transforms/InstCombine/objsize.ll
+++ b/llvm/test/Transforms/InstCombine/objsize.ll
@@ -14,19 +14,17 @@ define i32 @foo() nounwind {
   ret i32 %1
 }
 
-define ptr @bar() nounwind {
+define ptr @bar(ptr %retval) nounwind {
 ; CHECK-LABEL: @bar(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[RETVAL:%.*]] = alloca ptr, align 4
 ; CHECK-NEXT:    br i1 true, label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
 ; CHECK:       cond.true:
-; CHECK-NEXT:    [[TMP0:%.*]] = load ptr, ptr [[RETVAL]], align 4
-; CHECK-NEXT:    ret ptr [[TMP0]]
+; CHECK-NEXT:    [[TMP1:%.*]] = load ptr, ptr [[RETVAL:%.*]], align 4
+; CHECK-NEXT:    ret ptr [[TMP1]]
 ; CHECK:       cond.false:
 ; CHECK-NEXT:    ret ptr poison
 ;
 entry:
-  %retval = alloca ptr
   %0 = call i32 @llvm.objectsize.i32.p0(ptr @a, i1 false, i1 false, i1 false)
   %cmp = icmp ne i32 %0, -1
   br i1 %cmp, label %cond.true, label %cond.false

diff  --git a/llvm/test/Transforms/InstCombine/select-load.ll b/llvm/test/Transforms/InstCombine/select-load.ll
index 36883423aea36..308dc25bf780c 100644
--- a/llvm/test/Transforms/InstCombine/select-load.ll
+++ b/llvm/test/Transforms/InstCombine/select-load.ll
@@ -4,19 +4,14 @@
 target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-grtev4-linux-gnu"
 
-define i32 @test_plain(i1 %f) {
+define i32 @test_plain(i1 %f, ptr %a, ptr %b) {
 ; CHECK-LABEL: @test_plain(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 8
-; CHECK-NEXT:    [[B:%.*]] = alloca i32, align 8
-; CHECK-NEXT:    [[A_VAL:%.*]] = load i32, ptr [[A]], align 8
+; CHECK-NEXT:    [[B:%.*]] = select i1 [[F:%.*]], ptr [[A:%.*]], ptr [[B1:%.*]]
 ; CHECK-NEXT:    [[B_VAL:%.*]] = load i32, ptr [[B]], align 8
-; CHECK-NEXT:    [[L:%.*]] = select i1 [[F:%.*]], i32 [[A_VAL]], i32 [[B_VAL]]
-; CHECK-NEXT:    ret i32 [[L]]
+; CHECK-NEXT:    ret i32 [[B_VAL]]
 ;
 entry:
-  %a = alloca i32, align 8
-  %b = alloca i32, align 8
   %sel = select i1 %f, ptr %a, ptr %b
   %l = load i32, ptr %sel, align 8
   ret i32 %l
@@ -82,19 +77,14 @@ entry:
 
 ; Msan just propagates shadow, even if speculated load accesses uninitialized
 ; value, instrumentation will select shadow of the desired value anyway.
-define i32 @test_msan(i1 %f) sanitize_memory {
+define i32 @test_msan(i1 %f, ptr %a, ptr %b) sanitize_memory {
 ; CHECK-LABEL: @test_msan(
 ; CHECK-NEXT:  entry:
-; CHECK-NEXT:    [[A:%.*]] = alloca i32, align 8
-; CHECK-NEXT:    [[B:%.*]] = alloca i32, align 8
-; CHECK-NEXT:    [[A_VAL:%.*]] = load i32, ptr [[A]], align 8
+; CHECK-NEXT:    [[B:%.*]] = select i1 [[F:%.*]], ptr [[A:%.*]], ptr [[B1:%.*]]
 ; CHECK-NEXT:    [[B_VAL:%.*]] = load i32, ptr [[B]], align 8
-; CHECK-NEXT:    [[L:%.*]] = select i1 [[F:%.*]], i32 [[A_VAL]], i32 [[B_VAL]]
-; CHECK-NEXT:    ret i32 [[L]]
+; CHECK-NEXT:    ret i32 [[B_VAL]]
 ;
 entry:
-  %a = alloca i32, align 8
-  %b = alloca i32, align 8
   %sel = select i1 %f, ptr %a, ptr %b
   %l = load i32, ptr %sel, align 8
   ret i32 %l

diff  --git a/llvm/test/Transforms/InstCombine/shift-amount-reassociation.ll b/llvm/test/Transforms/InstCombine/shift-amount-reassociation.ll
index b4c606f037d56..abae2c6b9ab8e 100644
--- a/llvm/test/Transforms/InstCombine/shift-amount-reassociation.ll
+++ b/llvm/test/Transforms/InstCombine/shift-amount-reassociation.ll
@@ -156,11 +156,10 @@ define i32 @t11_shl_nsw_flag_preservation(i32 %x, i32 %y) {
 
 ; Reduced from https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=15587
 @X = external global i32
-define i64 @constantexpr() {
-; CHECK-LABEL: @constantexpr(
+define i64 @constantexpr(ptr %A) {
+; CHECK-LABEL: @constantexpr(ptr %A) {
 ; CHECK-NEXT:    ret i64 0
 ;
-  %A = alloca i64
   %L = load i64, ptr %A
   %V = add i64 ptrtoint (ptr @X to i64), 0
   %B2 = shl i64 %V, 0

diff  --git a/llvm/test/Transforms/InstCombine/vscale_gep.ll b/llvm/test/Transforms/InstCombine/vscale_gep.ll
index 84019e613d233..5d39ad4e01afe 100644
--- a/llvm/test/Transforms/InstCombine/vscale_gep.ll
+++ b/llvm/test/Transforms/InstCombine/vscale_gep.ll
@@ -41,31 +41,27 @@ define void @gep_bitcast(ptr %p) {
 
 ; These tests serve to verify code changes when underlying gep ptr is alloca.
 ; This test is to verify 'inbounds' is added when it's valid to accumulate constant offset.
-define i32 @gep_alloca_inbounds_vscale_zero() {
+define i32 @gep_alloca_inbounds_vscale_zero(ptr %a) {
 ; CHECK-LABEL: @gep_alloca_inbounds_vscale_zero(
-; CHECK-NEXT:    [[A:%.*]] = alloca <vscale x 4 x i32>, align 16
-; CHECK-NEXT:    [[TMP:%.*]] = getelementptr inbounds nuw i8, ptr [[A]], i64 8
+; CHECK-NEXT:    [[TMP:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 8
 ; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[TMP]], align 4
 ; CHECK-NEXT:    ret i32 [[LOAD]]
 ;
-  %a = alloca <vscale x 4 x i32>
   %tmp = getelementptr <vscale x 4 x i32>, ptr %a, i32 0, i32 2
   %load = load i32, ptr %tmp
   ret i32 %load
 }
 
 ; This test is to verify 'inbounds' is not added when a constant offset can not be determined at compile-time.
-define i32 @gep_alloca_inbounds_vscale_nonzero() {
+define i32 @gep_alloca_inbounds_vscale_nonzero(ptr %a) {
 ; CHECK-LABEL: @gep_alloca_inbounds_vscale_nonzero(
-; CHECK-NEXT:    [[A:%.*]] = alloca <vscale x 4 x i32>, align 16
 ; CHECK-NEXT:    [[TMP1:%.*]] = call i64 @llvm.vscale.i64()
 ; CHECK-NEXT:    [[TMP2:%.*]] = shl nuw i64 [[TMP1]], 4
-; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr i8, ptr [[A]], i64 [[TMP2]]
+; CHECK-NEXT:    [[TMP3:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP2]]
 ; CHECK-NEXT:    [[TMP:%.*]] = getelementptr i8, ptr [[TMP3]], i64 8
 ; CHECK-NEXT:    [[LOAD:%.*]] = load i32, ptr [[TMP]], align 4
 ; CHECK-NEXT:    ret i32 [[LOAD]]
 ;
-  %a = alloca <vscale x 4 x i32>
   %tmp = getelementptr <vscale x 4 x i32>, ptr %a, i32 1, i32 2
   %load = load i32, ptr %tmp
   ret i32 %load


        


More information about the llvm-commits mailing list