[llvm] b19194c - [InstCombine] handle subobjects of constant aggregates

Martin Sebor via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 21 10:55:41 PDT 2022


Author: Martin Sebor
Date: 2022-06-21T11:55:14-06:00
New Revision: b19194c032e7640be0a482f20491341a62e7304f

URL: https://github.com/llvm/llvm-project/commit/b19194c032e7640be0a482f20491341a62e7304f
DIFF: https://github.com/llvm/llvm-project/commit/b19194c032e7640be0a482f20491341a62e7304f.diff

LOG: [InstCombine] handle subobjects of constant aggregates

Remove the known limitation of the library function call folders to only
work with top-level arrays of characters (as per the TODO comment in
the code) and allows them to also fold calls involving subobjects of
constant aggregates such as member arrays.

Added: 
    llvm/test/Transforms/InstCombine/memchr-8.ll
    llvm/test/Transforms/InstCombine/wcslen-6.ll

Modified: 
    llvm/include/llvm/Analysis/ConstantFolding.h
    llvm/lib/Analysis/ConstantFolding.cpp
    llvm/lib/Analysis/ValueTracking.cpp
    llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
    llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
    llvm/test/CodeGen/ARM/constantpool-promote-ldrh.ll
    llvm/test/CodeGen/BPF/remove_truncate_5.ll
    llvm/test/CodeGen/BPF/rodata_2.ll
    llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll
    llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll
    llvm/test/DebugInfo/COFF/types-array.ll
    llvm/test/Transforms/InstCombine/memchr-5.ll
    llvm/test/Transforms/InstCombine/memcmp-3.ll
    llvm/test/Transforms/InstCombine/memcmp-4.ll
    llvm/test/Transforms/InstCombine/memcmp-constant-fold.ll
    llvm/test/Transforms/InstCombine/memrchr-5.ll
    llvm/test/Transforms/InstCombine/sprintf-2.ll
    llvm/test/Transforms/InstCombine/str-int-3.ll
    llvm/test/Transforms/InstCombine/strcmp-3.ll
    llvm/test/Transforms/InstCombine/strlen-5.ll
    llvm/test/Transforms/InstCombine/strlen-6.ll
    llvm/test/Transforms/InstCombine/strlen-7.ll
    llvm/test/Transforms/InstCombine/strlen-8.ll
    llvm/test/Transforms/InstCombine/strncmp-4.ll
    llvm/test/Transforms/InstCombine/wcslen-3.ll

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/ConstantFolding.h b/llvm/include/llvm/Analysis/ConstantFolding.h
index 4f62e89c4b44e..4a2bd2945b5a2 100644
--- a/llvm/include/llvm/Analysis/ConstantFolding.h
+++ b/llvm/include/llvm/Analysis/ConstantFolding.h
@@ -19,6 +19,8 @@
 #ifndef LLVM_ANALYSIS_CONSTANTFOLDING_H
 #define LLVM_ANALYSIS_CONSTANTFOLDING_H
 
+#include <stdint.h>
+
 namespace llvm {
 class APInt;
 template <typename T> class ArrayRef;
@@ -28,6 +30,7 @@ class DSOLocalEquivalent;
 class DataLayout;
 class Function;
 class GlobalValue;
+class GlobalVariable;
 class Instruction;
 class TargetLibraryInfo;
 class Type;
@@ -179,6 +182,8 @@ Constant *ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
 /// Check whether the given call has no side-effects.
 /// Specifically checks for math routimes which sometimes set errno.
 bool isMathLibCallNoop(const CallBase *Call, const TargetLibraryInfo *TLI);
+
+Constant *ReadByteArrayFromGlobal(const GlobalVariable *GV, uint64_t Offset);
 }
 
 #endif

diff  --git a/llvm/lib/Analysis/ConstantFolding.cpp b/llvm/lib/Analysis/ConstantFolding.cpp
index 8a4da8370563c..0096aa99ff6be 100644
--- a/llvm/lib/Analysis/ConstantFolding.cpp
+++ b/llvm/lib/Analysis/ConstantFolding.cpp
@@ -633,6 +633,39 @@ Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy,
   return ConstantInt::get(IntType->getContext(), ResultVal);
 }
 
+} // anonymous namespace
+
+// If GV is a constant with an initializer read its representation starting
+// at Offset and return it as a constant array of unsigned char.  Otherwise
+// return null.
+Constant *llvm::ReadByteArrayFromGlobal(const GlobalVariable *GV,
+                                        uint64_t Offset) {
+  if (!GV->isConstant() || !GV->hasDefinitiveInitializer())
+    return nullptr;
+
+  const DataLayout &DL = GV->getParent()->getDataLayout();
+  Constant *Init = const_cast<Constant *>(GV->getInitializer());
+  TypeSize InitSize = DL.getTypeAllocSize(Init->getType());
+  if (InitSize < Offset)
+    return nullptr;
+
+  uint64_t NBytes = InitSize - Offset;
+  if (NBytes > UINT16_MAX)
+    // Bail for large initializers in excess of 64K to avoid allocating
+    // too much memory.
+    // Offset is assumed to be less than or equal than InitSize (this
+    // is enforced in ReadDataFromGlobal).
+    return nullptr;
+
+  SmallVector<unsigned char, 256> RawBytes(static_cast<size_t>(NBytes));
+  unsigned char *CurPtr = RawBytes.data();
+
+  if (!ReadDataFromGlobal(Init, Offset, CurPtr, NBytes, DL))
+    return nullptr;
+
+  return ConstantDataArray::get(GV->getContext(), RawBytes);
+}
+
 /// If this Offset points exactly to the start of an aggregate element, return
 /// that element, otherwise return nullptr.
 Constant *getConstantAtOffset(Constant *Base, APInt Offset,
@@ -661,8 +694,6 @@ Constant *getConstantAtOffset(Constant *Base, APInt Offset,
   return C;
 }
 
-} // end anonymous namespace
-
 Constant *llvm::ConstantFoldLoadFromConst(Constant *C, Type *Ty,
                                           const APInt &Offset,
                                           const DataLayout &DL) {

diff  --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 3fa6365b2d6de..17bebe1905357 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -26,6 +26,7 @@
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/AssumeBundleQueries.h"
 #include "llvm/Analysis/AssumptionCache.h"
+#include "llvm/Analysis/ConstantFolding.h"
 #include "llvm/Analysis/EHPersonalities.h"
 #include "llvm/Analysis/GuardUtils.h"
 #include "llvm/Analysis/InstructionSimplify.h"
@@ -4174,6 +4175,10 @@ bool llvm::isGEPBasedOnPointerToString(const GEPOperator *GEP,
   return true;
 }
 
+// If V refers to an initialized global constant, set Slice either to
+// its initializer if the size of its elements equals ElementSize, or,
+// for ElementSize == 8, to its representation as an array of unsiged
+// char. Return true on success.
 bool llvm::getConstantDataArrayInfo(const Value *V,
                                     ConstantDataArraySlice &Slice,
                                     unsigned ElementSize, uint64_t Offset) {
@@ -4185,21 +4190,29 @@ bool llvm::getConstantDataArrayInfo(const Value *V,
   // If the value is a GEP instruction or constant expression, treat it as an
   // offset.
   if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
-    // The GEP operator should be based on a pointer to string constant, and is
-    // indexing into the string constant.
-    if (!isGEPBasedOnPointerToString(GEP, ElementSize))
+    // Fail if the first GEP operand is not a constant zero and we're
+    // not indexing into the initializer.
+    const ConstantInt *FirstIdx = dyn_cast<ConstantInt>(GEP->getOperand(1));
+    if (!FirstIdx || !FirstIdx->isZero())
       return false;
 
-    // If the second index isn't a ConstantInt, then this is a variable index
-    // into the array.  If this occurs, we can't say anything meaningful about
-    // the string.
-    uint64_t StartIdx = 0;
-    if (const ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(2)))
-      StartIdx = CI->getZExtValue();
-    else
+    Value *Op0 = GEP->getOperand(0);
+    const GlobalVariable *GV = dyn_cast<GlobalVariable>(Op0);
+    if (!GV)
+      return false;
+
+    // Fail if the offset into the initializer is not constant.
+    const DataLayout &DL = GV->getParent()->getDataLayout();
+    APInt Off(DL.getIndexSizeInBits(GEP->getPointerAddressSpace()), 0);
+    if (!GEP->accumulateConstantOffset(DL, Off))
       return false;
-    return getConstantDataArrayInfo(GEP->getOperand(0), Slice, ElementSize,
-                                    StartIdx + Offset);
+
+    // Fail if the constant offset is excessive.
+    uint64_t StartIdx = Off.getLimitedValue();
+    if (StartIdx == UINT64_MAX)
+      return false;
+
+    return getConstantDataArrayInfo(Op0, Slice, ElementSize, StartIdx + Offset);
   }
 
   // The GEP instruction, constant or instruction, must reference a global
@@ -4209,34 +4222,51 @@ bool llvm::getConstantDataArrayInfo(const Value *V,
   if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
     return false;
 
-  const ConstantDataArray *Array;
-  ArrayType *ArrayTy;
+  const DataLayout &DL = GV->getParent()->getDataLayout();
+  ConstantDataArray *Array = nullptr;
+  ArrayType *ArrayTy = nullptr;
+
   if (GV->getInitializer()->isNullValue()) {
     Type *GVTy = GV->getValueType();
-    if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
-      // A zeroinitializer for the array; there is no ConstantDataArray.
-      Array = nullptr;
-    } else {
-      const DataLayout &DL = GV->getParent()->getDataLayout();
-      uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
-      uint64_t Length = SizeInBytes / (ElementSize / 8);
-      if (Length <= Offset)
-        return false;
+    uint64_t SizeInBytes = DL.getTypeStoreSize(GVTy).getFixedSize();
+    uint64_t Length = SizeInBytes / (ElementSize / 8);
+    if (Length <= Offset)
+      // Bail on undersized constants to let sanitizers detect library
+      // calls with them as arguments.
+      return false;
 
-      Slice.Array = nullptr;
-      Slice.Offset = 0;
-      Slice.Length = Length - Offset;
-      return true;
+    Slice.Array = nullptr;
+    Slice.Offset = 0;
+    Slice.Length = Length - Offset;
+    return true;
+  }
+
+  auto *Init = const_cast<Constant *>(GV->getInitializer());
+  if (auto *ArrayInit = dyn_cast<ConstantDataArray>(Init)) {
+    Type *InitElTy = ArrayInit->getElementType();
+    if (InitElTy->isIntegerTy(ElementSize)) {
+      // If Init is an initializer for an array of the expected type
+      // and size, use it as is.
+      Array = ArrayInit;
+      ArrayTy = ArrayInit->getType();
     }
-  } else {
-    // This must be a ConstantDataArray.
-    Array = dyn_cast<ConstantDataArray>(GV->getInitializer());
-    if (!Array)
+  }
+
+  if (!Array) {
+    if (ElementSize != 8)
+      // TODO: Handle conversions to larger integral types.
+      return false;
+
+    // Otherwise extract the portion of the initializer starting
+    // at Offset as an array of bytes, and reset Offset.
+    Init = ReadByteArrayFromGlobal(GV, Offset);
+    if (!Init)
       return false;
-    ArrayTy = Array->getType();
+
+    Offset = 0;
+    Array = dyn_cast<ConstantDataArray>(Init);
+    ArrayTy = dyn_cast<ArrayType>(Init->getType());
   }
-  if (!ArrayTy->getElementType()->isIntegerTy(ElementSize))
-    return false;
 
   uint64_t NumElts = ArrayTy->getArrayNumElements();
   if (Offset > NumElts)

diff  --git a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
index c877121f0e29d..b10a4146ef91e 100644
--- a/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
+++ b/llvm/lib/Transforms/Utils/SimplifyLibCalls.cpp
@@ -690,6 +690,7 @@ Value *LibCallSimplifier::optimizeStringLength(CallInst *CI, IRBuilderBase &B,
   // very useful because calling strlen for a pointer of other types is
   // very uncommon.
   if (GEPOperator *GEP = dyn_cast<GEPOperator>(Src)) {
+    // TODO: Handle subobjects.
     if (!isGEPBasedOnPointerToString(GEP, CharSize))
       return nullptr;
 
@@ -1142,8 +1143,8 @@ Value *LibCallSimplifier::optimizeMemChr(CallInst *CI, IRBuilderBase &B) {
                           CI->getType());
 }
 
-// Optimize a memcmp call CI with constant arrays LHS and RHS and nonconstant
-// Size.
+// Optimize a memcmp call CI with constant arrays LHS and RHS and either
+// nonconstant Size or constant size known to be in bounds.
 static Value *optimizeMemCmpVarSize(CallInst *CI, Value *LHS, Value *RHS,
                                     Value *Size, IRBuilderBase &B,
                                     const DataLayout &DL) {
@@ -1174,9 +1175,15 @@ static Value *optimizeMemCmpVarSize(CallInst *CI, Value *LHS, Value *RHS,
     }
   }
 
+  if (auto *SizeC = dyn_cast<ConstantInt>(Size))
+    if (MinSize < SizeC->getZExtValue())
+      // Fail if the bound happens to be constant and excessive and
+      // let sanitizers catch it.
+      return nullptr;
+
   // One array is a leading part of the other of equal or greater size.
-  // Fold the result to zero.  Size is assumed to be in bounds, since
-  // otherwise the call would be undefined.
+  // Fold the result to zero.  Nonconstant size is assumed to be in bounds,
+  // since otherwise the call would be undefined.
   return Zero;
 }
 
@@ -1309,6 +1316,7 @@ Value *LibCallSimplifier::optimizeMemCCpy(CallInst *CI, IRBuilderBase &B) {
       return Constant::getNullValue(CI->getType());
     if (!getConstantStringInfo(Src, SrcStr, /*Offset=*/0,
                                /*TrimAtNul=*/false) ||
+        // TODO: Handle zeroinitializer.
         !StopChar)
       return nullptr;
   } else {

diff  --git a/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll b/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
index 262944498c0c4..dc1dc56eedb99 100644
--- a/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-2012-05-07-MemcpyAlignBug.ll
@@ -1,13 +1,13 @@
 ; RUN: llc < %s -mtriple=arm64-eabi -mcpu=cyclone | FileCheck %s
 ; <rdar://problem/11294426>
 
- at b = private unnamed_addr constant [3 x i32] [i32 1768775988, i32 1685481784, i32 1836253201], align 4
+ at b = common unnamed_addr global [3 x i32] zeroinitializer, align 4
 
 ; The important thing for this test is that we need an unaligned load of `l_b'
 ; ("ldr w2, [x1, #8]" in this case).
 
-; CHECK:      adrp x[[PAGE:[0-9]+]], {{l_b at PAGE|.Lb}}
-; CHECK: add  x[[ADDR:[0-9]+]], x[[PAGE]], {{l_b at PAGEOFF|:lo12:.Lb}}
+; CHECK:      adrp x[[PAGE:[0-9]+]], :got:b
+; CHECK-NEXT: ldr  x[[PAGE]], [x[[ADDR:[0-9]+]], :got_lo12:b]
 ; CHECK-NEXT: ldr  [[VAL2:x[0-9]+]], [x[[ADDR]]]
 ; CHECK-NEXT: ldr  [[VAL:w[0-9]+]], [x[[ADDR]], #8]
 ; CHECK-NEXT: str  [[VAL]], [x0, #8]

diff  --git a/llvm/test/CodeGen/ARM/constantpool-promote-ldrh.ll b/llvm/test/CodeGen/ARM/constantpool-promote-ldrh.ll
index 6fa5288c643b8..981366a473f0f 100644
--- a/llvm/test/CodeGen/ARM/constantpool-promote-ldrh.ll
+++ b/llvm/test/CodeGen/ARM/constantpool-promote-ldrh.ll
@@ -3,11 +3,11 @@
 target datalayout = "e-m:e-p:32:32-i64:64-v128:64:128-a:0:32-n32-S64"
 target triple = "thumbv6m-arm-linux-gnueabi"
 
- at fn1.a = private unnamed_addr constant [4 x i16] [i16 6, i16 0, i16 0, i16 0], align 2
+ at fn1.a = private unnamed_addr global [4 x i16] [i16 6, i16 0, i16 0, i16 0], align 2
 
 ; We must not try and emit this bad instruction: "ldrh r1, .LCPI0_0"
 ; CHECK-LABEL: fn1:
-; CHECK: adr [[base:r[0-9]+]], .LCPI0_0
+; CHECK: ldr [[base:r[0-9]+]], .LCPI0_0
 ; CHECK-NOT: ldrh {{r[0-9]+}}, .LCPI0_0
 ; CHECK: ldrh r{{[0-9]+}}, [[[base]]]
 define hidden i32 @fn1() #0 {

diff  --git a/llvm/test/CodeGen/BPF/remove_truncate_5.ll b/llvm/test/CodeGen/BPF/remove_truncate_5.ll
index 294f93dab9d1c..1e29a298eabb9 100644
--- a/llvm/test/CodeGen/BPF/remove_truncate_5.ll
+++ b/llvm/test/CodeGen/BPF/remove_truncate_5.ll
@@ -25,12 +25,11 @@ define dso_local void @test() local_unnamed_addr #0 {
   call void @llvm.lifetime.start.p0i8(i64 16, i8* nonnull %2) #3
   call void @llvm.memcpy.p0i8.p0i8.i64(i8* nonnull align 4 %2, i8* align 4 bitcast (%struct.test_t* @test.t to i8*), i64 16, i1 false)
 ; CHECK: r1 = 0
-; CHECK: r1 <<= 32
-; CHECK: r2 = r1
-; CHECK: r2 |= 0
-; CHECK: *(u64 *)(r10 - 8) = r2
-; CHECK: r1 |= 5
+; CHECK: *(u64 *)(r10 - 8) = r1
+; CHECK: r1 = 5
 ; CHECK: *(u64 *)(r10 - 16) = r1
+; CHECK: r1 = r10
+; CHECK: r1 += -16
   call void @foo(i8* nonnull %2) #3
 ; CHECK: call foo
   call void @llvm.lifetime.end.p0i8(i64 16, i8* nonnull %2) #3

diff  --git a/llvm/test/CodeGen/BPF/rodata_2.ll b/llvm/test/CodeGen/BPF/rodata_2.ll
index a7231f9635d38..4cbaf76f7248e 100644
--- a/llvm/test/CodeGen/BPF/rodata_2.ll
+++ b/llvm/test/CodeGen/BPF/rodata_2.ll
@@ -32,15 +32,18 @@ define i32 @test() local_unnamed_addr #0 {
 
 entry:
     tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 getelementptr inbounds (%struct.test_t2, %struct.test_t2* @g, i64 0, i32 0), i8* align 4 getelementptr inbounds (%struct.test_t2, %struct.test_t2* @test.t2, i64 0, i32 0), i64 32, i1 false)
-; CHECK:  r1 = g
-; CHECK:  r2 = 0
-; CHECK:  *(u32 *)(r1 + 28) = r2
-; CHECK:  r3 = 3
-; CHECK:  *(u32 *)(r1 + 24) = r3
-; CHECK:  r3 = 2
-; CHECK:  *(u32 *)(r1 + 20) = r3
-; CHECK:  r3 = 1
-; CHECK:  *(u32 *)(r1 + 16) = r3
+; CHECK: r1 = g ll
+; CHECK: r2 = 3
+; CHECK: *(u32 *)(r1 + 24) = r2
+; CHECK: r2 = 2
+; CHECK: *(u32 *)(r1 + 20) = r2
+; CHECK: r2 = 1
+; CHECK: *(u32 *)(r1 + 16) = r2
+; CHECK: r2 = 0
+; CHECK: *(u32 *)(r1 + 28) = r2
+; CHECK: *(u32 *)(r1 + 8) = r2
+; CHECK: *(u32 *)(r1 + 4) = r2
+; CHECK: *(u32 *)(r1 + 0) = r2
       ret i32 0
 }
 ; CHECK: .section  .rodata.cst32,"aM", at progbits,32

diff  --git a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll
index 52031e368fcad..760062e292d50 100644
--- a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills-mir.ll
@@ -15,123 +15,124 @@ define double @caller() {
 
   ; MIR32-LABEL: name: caller
   ; MIR32: bb.0.entry:
-  ; MIR32:   renamable $r3 = LWZtoc @__const.caller.t, $r2 :: (load (s32) from got)
-  ; MIR32:   renamable $r4 = LI 31
-  ; MIR32:   renamable $v2 = LVX renamable $r3, killed renamable $r4
-  ; MIR32:   renamable $r4 = LI 16
-  ; MIR32:   renamable $v3 = LVX renamable $r3, killed renamable $r4
-  ; MIR32:   renamable $v4 = LVSL $zero, renamable $r3
-  ; MIR32:   renamable $v2 = VPERM renamable $v3, killed renamable $v2, renamable $v4
-  ; MIR32:   renamable $r4 = LI 172
-  ; MIR32:   STXVW4X killed renamable $v2, $r1, killed renamable $r4 :: (store (s128) into unknown-address + 16, align 4)
-  ; MIR32:   renamable $v2 = LVX $zero, killed renamable $r3
-  ; MIR32:   renamable $v2 = VPERM killed renamable $v2, killed renamable $v3, killed renamable $v4
-  ; MIR32:   renamable $r3 = LI 156
-  ; MIR32:   STXVW4X killed renamable $v2, $r1, killed renamable $r3 :: (store (s128), align 4)
-  ; MIR32:   ADJCALLSTACKDOWN 188, 0, implicit-def dead $r1, implicit $r1
-  ; MIR32:   renamable $vsl0 = XXLXORz
-  ; MIR32:   $f1 = XXLXORdpz
-  ; MIR32:   $f2 = XXLXORdpz
-  ; MIR32:   $v2 = XXLXORz
-  ; MIR32:   $v3 = XXLXORz
-  ; MIR32:   $v4 = XXLXORz
-  ; MIR32:   $v5 = XXLXORz
-  ; MIR32:   $v6 = XXLXORz
-  ; MIR32:   $v7 = XXLXORz
-  ; MIR32:   $v8 = XXLXORz
-  ; MIR32:   $v9 = XXLXORz
-  ; MIR32:   $v10 = XXLXORz
-  ; MIR32:   $v11 = XXLXORz
-  ; MIR32:   $v12 = XXLXORz
-  ; MIR32:   $v13 = XXLXORz
-  ; MIR32:   $f3 = XXLXORdpz
-  ; MIR32:   $f4 = XXLXORdpz
-  ; MIR32:   $f5 = XXLXORdpz
-  ; MIR32:   $f6 = XXLXORdpz
-  ; MIR32:   $f7 = XXLXORdpz
-  ; MIR32:   renamable $r3 = LI 136
-  ; MIR32:   $f8 = XXLXORdpz
-  ; MIR32:   renamable $r4 = LI 120
-  ; MIR32:   renamable $r5 = LWZtoc %const.0, $r2 :: (load (s32) from got)
-  ; MIR32:   STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
-  ; MIR32:   $f9 = XXLXORdpz
-  ; MIR32:   renamable $r3 = LI 104
-  ; MIR32:   STXVW4X renamable $vsl0, $r1, killed renamable $r4 :: (store (s128), align 8)
-  ; MIR32:   $f10 = XXLXORdpz
-  ; MIR32:   STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
-  ; MIR32:   renamable $r3 = LI 88
-  ; MIR32:   $f11 = XXLXORdpz
-  ; MIR32:   STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
-  ; MIR32:   renamable $r3 = LI 72
-  ; MIR32:   renamable $v0 = LXVD2X $zero, killed renamable $r5 :: (load (s128) from constant-pool)
-  ; MIR32:   $f12 = XXLXORdpz
-  ; MIR32:   STXVW4X killed renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
-  ; MIR32:   $f13 = XXLXORdpz
-  ; MIR32:   renamable $r5 = LI 48
-  ; MIR32:   renamable $r6 = LI 512
-  ; MIR32:   $r3 = LI 128
-  ; MIR32:   $r4 = LI 256
-  ; MIR32:   STXVD2X killed renamable $v0, $r1, killed renamable $r5 :: (store (s128))
-  ; MIR32:   STW killed renamable $r6, 152, $r1 :: (store (s32))
-  ; MIR32:   BL_NOP <mcsymbol .callee[PR]>, csr_aix32_altivec, implicit-def dead $lr, implicit $rm, implicit $r3, implicit $r4, implicit $f1, implicit $f2, implicit $v2, implicit $v3, implicit $v4, implicit $v5, implicit killed $v6, implicit killed $v7, implicit killed $v8, implicit killed $v9, implicit killed $v10, implicit killed $v11, implicit killed $v12, implicit killed $v13, implicit $f3, implicit $f4, implicit $f5, implicit $f6, implicit $f7, implicit $f8, implicit $f9, implicit $f10, implicit $f11, implicit $f12, implicit $f13, implicit $r2, implicit-def $r1, implicit-def $f1
-  ; MIR32:   ADJCALLSTACKUP 188, 0, implicit-def dead $r1, implicit $r1
-  ; MIR32:   BLR implicit $lr, implicit $rm, implicit $f1
+  ; MIR32-NEXT:   renamable $r3 = LIS 16392
+  ; MIR32-NEXT:   renamable $r4 = LIS 16384
+  ; MIR32-NEXT:   STW killed renamable $r3, 180, $r1 :: (store (s32) into unknown-address + 24)
+  ; MIR32-NEXT:   renamable $r3 = LI 0
+  ; MIR32-NEXT:   STW killed renamable $r4, 172, $r1 :: (store (s32) into unknown-address + 16)
+  ; MIR32-NEXT:   renamable $r4 = LIS 16368
+  ; MIR32-NEXT:   STW renamable $r3, 184, $r1 :: (store (s32) into unknown-address + 28)
+  ; MIR32-NEXT:   STW renamable $r3, 176, $r1 :: (store (s32) into unknown-address + 20)
+  ; MIR32-NEXT:   STW renamable $r3, 168, $r1 :: (store (s32) into unknown-address + 12)
+  ; MIR32-NEXT:   STW killed renamable $r4, 164, $r1 :: (store (s32) into unknown-address + 8)
+  ; MIR32-NEXT:   STW renamable $r3, 160, $r1 :: (store (s32) into unknown-address + 4)
+  ; MIR32-NEXT:   STW killed renamable $r3, 156, $r1 :: (store (s32))
+  ; MIR32-NEXT:   ADJCALLSTACKDOWN 188, 0, implicit-def dead $r1, implicit $r1
+  ; MIR32-NEXT:   renamable $vsl0 = XXLXORz
+  ; MIR32-NEXT:   $f1 = XXLXORdpz
+  ; MIR32-NEXT:   $f2 = XXLXORdpz
+  ; MIR32-NEXT:   $v2 = XXLXORz
+  ; MIR32-NEXT:   $v3 = XXLXORz
+  ; MIR32-NEXT:   $v4 = XXLXORz
+  ; MIR32-NEXT:   $v5 = XXLXORz
+  ; MIR32-NEXT:   $v6 = XXLXORz
+  ; MIR32-NEXT:   $v7 = XXLXORz
+  ; MIR32-NEXT:   $v8 = XXLXORz
+  ; MIR32-NEXT:   $v9 = XXLXORz
+  ; MIR32-NEXT:   $v10 = XXLXORz
+  ; MIR32-NEXT:   $v11 = XXLXORz
+  ; MIR32-NEXT:   $v12 = XXLXORz
+  ; MIR32-NEXT:   $v13 = XXLXORz
+  ; MIR32-NEXT:   $f3 = XXLXORdpz
+  ; MIR32-NEXT:   $f4 = XXLXORdpz
+  ; MIR32-NEXT:   $f5 = XXLXORdpz
+  ; MIR32-NEXT:   $f6 = XXLXORdpz
+  ; MIR32-NEXT:   $f7 = XXLXORdpz
+  ; MIR32-NEXT:   renamable $r3 = LI 136
+  ; MIR32-NEXT:   $f8 = XXLXORdpz
+  ; MIR32-NEXT:   renamable $r4 = LI 120
+  ; MIR32-NEXT:   renamable $r5 = LWZtoc %const.0, $r2 :: (load (s32) from got)
+  ; MIR32-NEXT:   STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
+  ; MIR32-NEXT:   $f9 = XXLXORdpz
+  ; MIR32-NEXT:   renamable $r3 = LI 104
+  ; MIR32-NEXT:   STXVW4X renamable $vsl0, $r1, killed renamable $r4 :: (store (s128), align 8)
+  ; MIR32-NEXT:   $f10 = XXLXORdpz
+  ; MIR32-NEXT:   STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
+  ; MIR32-NEXT:   renamable $r3 = LI 88
+  ; MIR32-NEXT:   $f11 = XXLXORdpz
+  ; MIR32-NEXT:   STXVW4X renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
+  ; MIR32-NEXT:   renamable $r3 = LI 72
+  ; MIR32-NEXT:   renamable $v0 = LXVD2X $zero, killed renamable $r5 :: (load (s128) from constant-pool)
+  ; MIR32-NEXT:   $f12 = XXLXORdpz
+  ; MIR32-NEXT:   STXVW4X killed renamable $vsl0, $r1, killed renamable $r3 :: (store (s128), align 8)
+  ; MIR32-NEXT:   $f13 = XXLXORdpz
+  ; MIR32-NEXT:   renamable $r5 = LI 48
+  ; MIR32-NEXT:   renamable $r6 = LI 512
+  ; MIR32-NEXT:   $r3 = LI 128
+  ; MIR32-NEXT:   $r4 = LI 256
+  ; MIR32-NEXT:   STXVD2X killed renamable $v0, $r1, killed renamable $r5 :: (store (s128))
+  ; MIR32-NEXT:   STW killed renamable $r6, 152, $r1 :: (store (s32))
+  ; MIR32-NEXT:   BL_NOP <mcsymbol .callee[PR]>, csr_aix32_altivec, implicit-def dead $lr, implicit $rm, implicit $r3, implicit $r4, implicit $f1, implicit $f2, implicit $v2, implicit $v3, implicit $v4, implicit $v5, implicit killed $v6, implicit killed $v7, implicit killed $v8, implicit killed $v9, implicit killed $v10, implicit killed $v11, implicit killed $v12, implicit killed $v13, implicit $f3, implicit $f4, implicit $f5, implicit $f6, implicit $f7, implicit $f8, implicit $f9, implicit $f10, implicit $f11, implicit $f12, implicit $f13, implicit $r2, implicit-def $r1, implicit-def $f1
+  ; MIR32-NEXT:   ADJCALLSTACKUP 188, 0, implicit-def dead $r1, implicit $r1
+  ; MIR32-NEXT:   BLR implicit $lr, implicit $rm, implicit $f1
   ; MIR64-LABEL: name: caller
   ; MIR64: bb.0.entry:
-  ; MIR64:   renamable $x3 = LDtoc @__const.caller.t, $x2 :: (load (s64) from got)
-  ; MIR64:   renamable $x4 = LI8 16
-  ; MIR64:   renamable $vsl0 = LXVD2X renamable $x3, killed renamable $x4 :: (load (s128) from unknown-address + 16, align 8)
-  ; MIR64:   renamable $x4 = LI8 208
-  ; MIR64:   STXVD2X killed renamable $vsl0, $x1, killed renamable $x4 :: (store (s128) into unknown-address + 16, align 4)
-  ; MIR64:   renamable $vsl0 = LXVD2X $zero8, killed renamable $x3 :: (load (s128), align 8)
-  ; MIR64:   renamable $x3 = LI8 192
-  ; MIR64:   STXVD2X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 4)
-  ; MIR64:   ADJCALLSTACKDOWN 224, 0, implicit-def dead $r1, implicit $r1
-  ; MIR64:   renamable $vsl0 = XXLXORz
-  ; MIR64:   $f1 = XXLXORdpz
-  ; MIR64:   $f2 = XXLXORdpz
-  ; MIR64:   $v2 = XXLXORz
-  ; MIR64:   $v3 = XXLXORz
-  ; MIR64:   $v4 = XXLXORz
-  ; MIR64:   $v5 = XXLXORz
-  ; MIR64:   $v6 = XXLXORz
-  ; MIR64:   $v7 = XXLXORz
-  ; MIR64:   $v8 = XXLXORz
-  ; MIR64:   $v9 = XXLXORz
-  ; MIR64:   $v10 = XXLXORz
-  ; MIR64:   $v11 = XXLXORz
-  ; MIR64:   $v12 = XXLXORz
-  ; MIR64:   $v13 = XXLXORz
-  ; MIR64:   $f3 = XXLXORdpz
-  ; MIR64:   $f4 = XXLXORdpz
-  ; MIR64:   $f5 = XXLXORdpz
-  ; MIR64:   $f6 = XXLXORdpz
-  ; MIR64:   renamable $x3 = LDtocCPT %const.0, $x2 :: (load (s64) from got)
-  ; MIR64:   $f7 = XXLXORdpz
-  ; MIR64:   $f8 = XXLXORdpz
-  ; MIR64:   renamable $x4 = LI8 160
-  ; MIR64:   $f9 = XXLXORdpz
-  ; MIR64:   renamable $x5 = LI8 144
-  ; MIR64:   STXVW4X renamable $vsl0, $x1, killed renamable $x4 :: (store (s128), align 8)
-  ; MIR64:   renamable $vsl13 = LXVD2X $zero8, killed renamable $x3 :: (load (s128) from constant-pool)
-  ; MIR64:   $f10 = XXLXORdpz
-  ; MIR64:   renamable $x3 = LI8 128
-  ; MIR64:   STXVW4X renamable $vsl0, $x1, killed renamable $x5 :: (store (s128), align 8)
-  ; MIR64:   $f11 = XXLXORdpz
-  ; MIR64:   renamable $x4 = LI8 80
-  ; MIR64:   STXVW4X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 8)
-  ; MIR64:   $f12 = XXLXORdpz
-  ; MIR64:   STXVD2X killed renamable $vsl13, $x1, killed renamable $x4 :: (store (s128))
-  ; MIR64:   $f13 = XXLXORdpz
-  ; MIR64:   renamable $x5 = LI8 512
-  ; MIR64:   renamable $x6 = LI8 0
-  ; MIR64:   $x3 = LI8 128
-  ; MIR64:   $x4 = LI8 256
-  ; MIR64:   STD killed renamable $x5, 184, $x1 :: (store (s64))
-  ; MIR64:   STD killed renamable $x6, 176, $x1 :: (store (s64))
-  ; MIR64:   BL8_NOP <mcsymbol .callee[PR]>, csr_ppc64_altivec, implicit-def dead $lr8, implicit $rm, implicit $x3, implicit $x4, implicit $f1, implicit $f2, implicit killed $v2, implicit killed $v3, implicit killed $v4, implicit killed $v5, implicit killed $v6, implicit killed $v7, implicit killed $v8, implicit killed $v9, implicit killed $v10, implicit killed $v11, implicit killed $v12, implicit killed $v13, implicit $f3, implicit $f4, implicit $f5, implicit $f6, implicit $f7, implicit $f8, implicit $f9, implicit $f10, implicit $f11, implicit $f12, implicit $f13, implicit $x2, implicit-def $r1, implicit-def $f1
-  ; MIR64:   ADJCALLSTACKUP 224, 0, implicit-def dead $r1, implicit $r1
-  ; MIR64:   BLR8 implicit $lr8, implicit $rm, implicit $f1
+  ; MIR64-NEXT:   renamable $x3 = LI8 2049
+  ; MIR64-NEXT:   renamable $x4 = LI8 1
+  ; MIR64-NEXT:   renamable $x3 = RLDIC killed renamable $x3, 51, 1
+  ; MIR64-NEXT:   renamable $x4 = RLDIC killed renamable $x4, 62, 1
+  ; MIR64-NEXT:   STD killed renamable $x3, 216, $x1 :: (store (s64) into unknown-address + 24, align 4)
+  ; MIR64-NEXT:   renamable $x3 = LI8 1023
+  ; MIR64-NEXT:   STD killed renamable $x4, 208, $x1 :: (store (s64) into unknown-address + 16, align 4)
+  ; MIR64-NEXT:   renamable $x5 = LI8 0
+  ; MIR64-NEXT:   renamable $x3 = RLDIC killed renamable $x3, 52, 2
+  ; MIR64-NEXT:   STD renamable $x5, 192, $x1 :: (store (s64), align 4)
+  ; MIR64-NEXT:   STD killed renamable $x3, 200, $x1 :: (store (s64) into unknown-address + 8, align 4)
+  ; MIR64-NEXT:   ADJCALLSTACKDOWN 224, 0, implicit-def dead $r1, implicit $r1
+  ; MIR64-NEXT:   renamable $vsl0 = XXLXORz
+  ; MIR64-NEXT:   $f1 = XXLXORdpz
+  ; MIR64-NEXT:   $f2 = XXLXORdpz
+  ; MIR64-NEXT:   $v2 = XXLXORz
+  ; MIR64-NEXT:   $v3 = XXLXORz
+  ; MIR64-NEXT:   $v4 = XXLXORz
+  ; MIR64-NEXT:   $v5 = XXLXORz
+  ; MIR64-NEXT:   $v6 = XXLXORz
+  ; MIR64-NEXT:   $v7 = XXLXORz
+  ; MIR64-NEXT:   $v8 = XXLXORz
+  ; MIR64-NEXT:   $v9 = XXLXORz
+  ; MIR64-NEXT:   $v10 = XXLXORz
+  ; MIR64-NEXT:   $v11 = XXLXORz
+  ; MIR64-NEXT:   $v12 = XXLXORz
+  ; MIR64-NEXT:   $v13 = XXLXORz
+  ; MIR64-NEXT:   $f3 = XXLXORdpz
+  ; MIR64-NEXT:   $f4 = XXLXORdpz
+  ; MIR64-NEXT:   $f5 = XXLXORdpz
+  ; MIR64-NEXT:   $f6 = XXLXORdpz
+  ; MIR64-NEXT:   $f7 = XXLXORdpz
+  ; MIR64-NEXT:   renamable $x3 = LDtocCPT %const.0, $x2 :: (load (s64) from got)
+  ; MIR64-NEXT:   $f8 = XXLXORdpz
+  ; MIR64-NEXT:   $f9 = XXLXORdpz
+  ; MIR64-NEXT:   renamable $x4 = LI8 160
+  ; MIR64-NEXT:   $f10 = XXLXORdpz
+  ; MIR64-NEXT:   renamable $x6 = LI8 144
+  ; MIR64-NEXT:   STXVW4X renamable $vsl0, $x1, killed renamable $x4 :: (store (s128), align 8)
+  ; MIR64-NEXT:   renamable $v0 = LXVD2X $zero8, killed renamable $x3 :: (load (s128) from constant-pool)
+  ; MIR64-NEXT:   $f11 = XXLXORdpz
+  ; MIR64-NEXT:   renamable $x3 = LI8 128
+  ; MIR64-NEXT:   STXVW4X renamable $vsl0, $x1, killed renamable $x6 :: (store (s128), align 8)
+  ; MIR64-NEXT:   $f12 = XXLXORdpz
+  ; MIR64-NEXT:   renamable $x4 = LI8 80
+  ; MIR64-NEXT:   STXVW4X killed renamable $vsl0, $x1, killed renamable $x3 :: (store (s128), align 8)
+  ; MIR64-NEXT:   $f13 = XXLXORdpz
+  ; MIR64-NEXT:   STXVD2X killed renamable $v0, $x1, killed renamable $x4 :: (store (s128))
+  ; MIR64-NEXT:   renamable $x6 = LI8 512
+  ; MIR64-NEXT:   $x3 = LI8 128
+  ; MIR64-NEXT:   $x4 = LI8 256
+  ; MIR64-NEXT:   STD killed renamable $x6, 184, $x1 :: (store (s64))
+  ; MIR64-NEXT:   STD killed renamable $x5, 176, $x1 :: (store (s64))
+  ; MIR64-NEXT:   BL8_NOP <mcsymbol .callee[PR]>, csr_ppc64_altivec, implicit-def dead $lr8, implicit $rm, implicit $x3, implicit $x4, implicit $f1, implicit $f2, implicit $v2, implicit $v3, implicit $v4, implicit $v5, implicit killed $v6, implicit killed $v7, implicit killed $v8, implicit killed $v9, implicit killed $v10, implicit killed $v11, implicit killed $v12, implicit killed $v13, implicit $f3, implicit $f4, implicit $f5, implicit $f6, implicit $f7, implicit $f8, implicit $f9, implicit $f10, implicit $f11, implicit $f12, implicit $f13, implicit $x2, implicit-def $r1, implicit-def $f1
+  ; MIR64-NEXT:   ADJCALLSTACKUP 224, 0, implicit-def dead $r1, implicit $r1
+  ; MIR64-NEXT:   BLR8 implicit $lr8, implicit $rm, implicit $f1
   entry:
     %call = tail call double @callee(i32 signext 128, i32 signext 256, double 0.000000e+00, double 0.000000e+00, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 0.000000e+00, double 0.000000e+00>, <2 x double> <double 2.400000e+01, double 2.500000e+01>, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, double 0.000000e+00, i32 signext 512, %struct.Test* nonnull byval(%struct.Test) align 4 @__const.caller.t)
       ret double %call

diff  --git a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll
index 68323970cb6e2..c21865d9684fb 100644
--- a/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll
+++ b/llvm/test/CodeGen/PowerPC/aix-vec-arg-spills.ll
@@ -16,61 +16,60 @@ define double @caller() {
 ; 32BIT-NEXT:    mflr 0
 ; 32BIT-NEXT:    stw 0, 8(1)
 ; 32BIT-NEXT:    stwu 1, -192(1)
-; 32BIT-NEXT:    lwz 3, L..C0(2) # @__const.caller.t
-; 32BIT-NEXT:    li 4, 31
+; 32BIT-NEXT:    lis 3, 16392
+; 32BIT-NEXT:    lis 4, 16384
 ; 32BIT-NEXT:    xxlxor 0, 0, 0
-; 32BIT-NEXT:    lwz 5, L..C1(2) # %const.0
+; 32BIT-NEXT:    lwz 5, L..C0(2) # %const.0
 ; 32BIT-NEXT:    li 6, 512
+; 32BIT-NEXT:    stw 3, 180(1)
+; 32BIT-NEXT:    li 3, 0
 ; 32BIT-NEXT:    xxlxor 1, 1, 1
+; 32BIT-NEXT:    stw 4, 172(1)
+; 32BIT-NEXT:    lis 4, 16368
 ; 32BIT-NEXT:    xxlxor 2, 2, 2
-; 32BIT-NEXT:    lvx 2, 3, 4
-; 32BIT-NEXT:    li 4, 16
-; 32BIT-NEXT:    lvsl 4, 0, 3
-; 32BIT-NEXT:    xxlxor 37, 37, 37
-; 32BIT-NEXT:    lvx 3, 3, 4
-; 32BIT-NEXT:    li 4, 172
+; 32BIT-NEXT:    stw 3, 184(1)
+; 32BIT-NEXT:    stw 3, 176(1)
+; 32BIT-NEXT:    xxlxor 34, 34, 34
+; 32BIT-NEXT:    stw 3, 168(1)
+; 32BIT-NEXT:    stw 3, 160(1)
+; 32BIT-NEXT:    xxlxor 35, 35, 35
+; 32BIT-NEXT:    stw 3, 156(1)
+; 32BIT-NEXT:    li 3, 136
 ; 32BIT-NEXT:    lxvd2x 32, 0, 5
+; 32BIT-NEXT:    xxlxor 36, 36, 36
+; 32BIT-NEXT:    stw 4, 164(1)
+; 32BIT-NEXT:    li 4, 120
+; 32BIT-NEXT:    stxvw4x 0, 1, 3
+; 32BIT-NEXT:    xxlxor 37, 37, 37
+; 32BIT-NEXT:    li 3, 104
+; 32BIT-NEXT:    stxvw4x 0, 1, 4
 ; 32BIT-NEXT:    xxlxor 38, 38, 38
-; 32BIT-NEXT:    xxlxor 39, 39, 39
 ; 32BIT-NEXT:    li 5, 48
-; 32BIT-NEXT:    vperm 2, 3, 2, 4
+; 32BIT-NEXT:    stxvw4x 0, 1, 3
+; 32BIT-NEXT:    li 3, 88
+; 32BIT-NEXT:    xxlxor 39, 39, 39
+; 32BIT-NEXT:    li 4, 256
 ; 32BIT-NEXT:    xxlxor 40, 40, 40
+; 32BIT-NEXT:    stxvw4x 0, 1, 3
+; 32BIT-NEXT:    li 3, 72
 ; 32BIT-NEXT:    xxlxor 41, 41, 41
+; 32BIT-NEXT:    stxvw4x 0, 1, 3
+; 32BIT-NEXT:    li 3, 128
 ; 32BIT-NEXT:    xxlxor 42, 42, 42
+; 32BIT-NEXT:    stxvd2x 32, 1, 5
+; 32BIT-NEXT:    stw 6, 152(1)
 ; 32BIT-NEXT:    xxlxor 43, 43, 43
 ; 32BIT-NEXT:    xxlxor 44, 44, 44
-; 32BIT-NEXT:    stxvw4x 34, 1, 4
-; 32BIT-NEXT:    li 4, 120
 ; 32BIT-NEXT:    xxlxor 45, 45, 45
-; 32BIT-NEXT:    lvx 2, 0, 3
-; 32BIT-NEXT:    li 3, 156
 ; 32BIT-NEXT:    xxlxor 3, 3, 3
 ; 32BIT-NEXT:    xxlxor 4, 4, 4
-; 32BIT-NEXT:    vperm 2, 2, 3, 4
-; 32BIT-NEXT:    xxlxor 35, 35, 35
-; 32BIT-NEXT:    xxlxor 36, 36, 36
 ; 32BIT-NEXT:    xxlxor 5, 5, 5
 ; 32BIT-NEXT:    xxlxor 6, 6, 6
 ; 32BIT-NEXT:    xxlxor 7, 7, 7
-; 32BIT-NEXT:    stxvw4x 34, 1, 3
-; 32BIT-NEXT:    li 3, 136
-; 32BIT-NEXT:    xxlxor 34, 34, 34
-; 32BIT-NEXT:    stxvw4x 0, 1, 3
-; 32BIT-NEXT:    li 3, 104
-; 32BIT-NEXT:    stxvw4x 0, 1, 4
-; 32BIT-NEXT:    li 4, 256
-; 32BIT-NEXT:    stxvw4x 0, 1, 3
-; 32BIT-NEXT:    li 3, 88
 ; 32BIT-NEXT:    xxlxor 8, 8, 8
 ; 32BIT-NEXT:    xxlxor 9, 9, 9
-; 32BIT-NEXT:    stxvw4x 0, 1, 3
-; 32BIT-NEXT:    li 3, 72
 ; 32BIT-NEXT:    xxlxor 10, 10, 10
-; 32BIT-NEXT:    stxvw4x 0, 1, 3
-; 32BIT-NEXT:    li 3, 128
 ; 32BIT-NEXT:    xxlxor 11, 11, 11
-; 32BIT-NEXT:    stxvd2x 32, 1, 5
-; 32BIT-NEXT:    stw 6, 152(1)
 ; 32BIT-NEXT:    xxlxor 12, 12, 12
 ; 32BIT-NEXT:    xxlxor 13, 13, 13
 ; 32BIT-NEXT:    bl .callee[PR]
@@ -85,47 +84,49 @@ define double @caller() {
 ; 64BIT-NEXT:    mflr 0
 ; 64BIT-NEXT:    std 0, 16(1)
 ; 64BIT-NEXT:    stdu 1, -224(1)
-; 64BIT-NEXT:    ld 3, L..C0(2) # @__const.caller.t
-; 64BIT-NEXT:    li 4, 16
-; 64BIT-NEXT:    li 5, 144
+; 64BIT-NEXT:    li 3, 2049
+; 64BIT-NEXT:    li 4, 1
+; 64BIT-NEXT:    xxlxor 0, 0, 0
+; 64BIT-NEXT:    li 5, 0
+; 64BIT-NEXT:    rldic 3, 3, 51, 1
+; 64BIT-NEXT:    rldic 4, 4, 62, 1
 ; 64BIT-NEXT:    xxlxor 1, 1, 1
-; 64BIT-NEXT:    li 6, 0
+; 64BIT-NEXT:    li 6, 144
+; 64BIT-NEXT:    std 3, 216(1)
+; 64BIT-NEXT:    li 3, 1023
 ; 64BIT-NEXT:    xxlxor 2, 2, 2
+; 64BIT-NEXT:    rldic 3, 3, 52, 2
+; 64BIT-NEXT:    std 4, 208(1)
+; 64BIT-NEXT:    li 4, 160
 ; 64BIT-NEXT:    xxlxor 34, 34, 34
-; 64BIT-NEXT:    lxvd2x 0, 3, 4
-; 64BIT-NEXT:    li 4, 208
+; 64BIT-NEXT:    std 3, 200(1)
+; 64BIT-NEXT:    ld 3, L..C0(2) # %const.0
+; 64BIT-NEXT:    std 5, 192(1)
 ; 64BIT-NEXT:    xxlxor 35, 35, 35
 ; 64BIT-NEXT:    xxlxor 36, 36, 36
+; 64BIT-NEXT:    stxvw4x 0, 1, 4
+; 64BIT-NEXT:    li 4, 80
 ; 64BIT-NEXT:    xxlxor 37, 37, 37
-; 64BIT-NEXT:    stxvd2x 0, 1, 4
-; 64BIT-NEXT:    li 4, 160
+; 64BIT-NEXT:    stxvw4x 0, 1, 6
+; 64BIT-NEXT:    li 6, 512
+; 64BIT-NEXT:    lxvd2x 32, 0, 3
 ; 64BIT-NEXT:    xxlxor 38, 38, 38
-; 64BIT-NEXT:    lxvd2x 0, 0, 3
-; 64BIT-NEXT:    li 3, 192
+; 64BIT-NEXT:    li 3, 128
 ; 64BIT-NEXT:    xxlxor 39, 39, 39
+; 64BIT-NEXT:    stxvw4x 0, 1, 3
 ; 64BIT-NEXT:    xxlxor 40, 40, 40
 ; 64BIT-NEXT:    xxlxor 41, 41, 41
-; 64BIT-NEXT:    stxvd2x 0, 1, 3
-; 64BIT-NEXT:    ld 3, L..C1(2) # %const.0
-; 64BIT-NEXT:    xxlxor 0, 0, 0
+; 64BIT-NEXT:    stxvd2x 32, 1, 4
+; 64BIT-NEXT:    li 4, 256
+; 64BIT-NEXT:    std 6, 184(1)
 ; 64BIT-NEXT:    xxlxor 42, 42, 42
-; 64BIT-NEXT:    stxvw4x 0, 1, 4
-; 64BIT-NEXT:    li 4, 80
+; 64BIT-NEXT:    std 5, 176(1)
 ; 64BIT-NEXT:    xxlxor 43, 43, 43
-; 64BIT-NEXT:    lxvd2x 13, 0, 3
-; 64BIT-NEXT:    li 3, 128
 ; 64BIT-NEXT:    xxlxor 44, 44, 44
-; 64BIT-NEXT:    stxvw4x 0, 1, 5
 ; 64BIT-NEXT:    xxlxor 45, 45, 45
-; 64BIT-NEXT:    stxvw4x 0, 1, 3
-; 64BIT-NEXT:    li 5, 512
 ; 64BIT-NEXT:    xxlxor 3, 3, 3
 ; 64BIT-NEXT:    xxlxor 4, 4, 4
-; 64BIT-NEXT:    stxvd2x 13, 1, 4
-; 64BIT-NEXT:    li 4, 256
-; 64BIT-NEXT:    std 5, 184(1)
 ; 64BIT-NEXT:    xxlxor 5, 5, 5
-; 64BIT-NEXT:    std 6, 176(1)
 ; 64BIT-NEXT:    xxlxor 6, 6, 6
 ; 64BIT-NEXT:    xxlxor 7, 7, 7
 ; 64BIT-NEXT:    xxlxor 8, 8, 8

diff  --git a/llvm/test/DebugInfo/COFF/types-array.ll b/llvm/test/DebugInfo/COFF/types-array.ll
index 2962f970aca14..1309328f5ebe8 100644
--- a/llvm/test/DebugInfo/COFF/types-array.ll
+++ b/llvm/test/DebugInfo/COFF/types-array.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc < %s -filetype=obj | llvm-readobj - --codeview | FileCheck %s
 ; RUN: llc < %s | llvm-mc -filetype=obj --triple=i686-windows | llvm-readobj - --codeview | FileCheck %s
 
@@ -71,9 +72,9 @@
 ; CHECK:     DefRangeFramePointerRelSym {
 ; CHECK:       Offset: -20
 ; CHECK:       LocalVariableAddrRange {
-; CHECK:         OffsetStart: .text+0x6
+; CHECK:         OffsetStart: .text+0x9
 ; CHECK:         ISectStart: 0x0
-; CHECK:         Range: 0x33
+; CHECK:         Range: 0x30
 ; CHECK:       }
 ; CHECK:     }
 ; CHECK:     ProcEnd {

diff  --git a/llvm/test/Transforms/InstCombine/memchr-5.ll b/llvm/test/Transforms/InstCombine/memchr-5.ll
index f1225ae088370..434f6641c44e7 100644
--- a/llvm/test/Transforms/InstCombine/memchr-5.ll
+++ b/llvm/test/Transforms/InstCombine/memchr-5.ll
@@ -15,85 +15,39 @@ declare i8* @memchr(i8*, i32, i64)
 
 define void @fold_memchr_a(i64* %pcmp) {
 ; BE-LABEL: @fold_memchr_a(
-; BE-NEXT:    [[PA:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 97, i64 16)
-; BE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
-; BE-NEXT:    [[OFFA:%.*]] = sub i64 [[IPA]], ptrtoint ([4 x i32]* @a to i64)
-; BE-NEXT:    store i64 [[OFFA]], i64* [[PCMP:%.*]], align 4
-; BE-NEXT:    [[PB:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 98, i64 16)
-; BE-NEXT:    [[IPB:%.*]] = ptrtoint i8* [[PB]] to i64
-; BE-NEXT:    [[OFFB:%.*]] = sub i64 [[IPB]], ptrtoint ([4 x i32]* @a to i64)
+; BE-NEXT:    store i64 0, i64* [[PCMP:%.*]], align 4
 ; BE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; BE-NEXT:    store i64 [[OFFB]], i64* [[PSTOR1]], align 4
-; BE-NEXT:    [[PC:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 99, i64 16)
-; BE-NEXT:    [[IPC:%.*]] = ptrtoint i8* [[PC]] to i64
-; BE-NEXT:    [[OFFC:%.*]] = sub i64 [[IPC]], ptrtoint ([4 x i32]* @a to i64)
+; BE-NEXT:    store i64 1, i64* [[PSTOR1]], align 4
 ; BE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; BE-NEXT:    store i64 [[OFFC]], i64* [[PSTOR2]], align 4
-; BE-NEXT:    [[PD:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 100, i64 16)
-; BE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
-; BE-NEXT:    [[OFFD:%.*]] = sub i64 [[IPD]], ptrtoint ([4 x i32]* @a to i64)
+; BE-NEXT:    store i64 2, i64* [[PSTOR2]], align 4
 ; BE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; BE-NEXT:    store i64 [[OFFD]], i64* [[PSTOR3]], align 4
-; BE-NEXT:    [[PN:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 110, i64 16)
-; BE-NEXT:    [[IPN:%.*]] = ptrtoint i8* [[PN]] to i64
-; BE-NEXT:    [[OFFN:%.*]] = sub i64 [[IPN]], ptrtoint ([4 x i32]* @a to i64)
+; BE-NEXT:    store i64 3, i64* [[PSTOR3]], align 4
 ; BE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; BE-NEXT:    store i64 [[OFFN]], i64* [[PSTOR4]], align 4
-; BE-NEXT:    [[PO:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 111, i64 16)
-; BE-NEXT:    [[IPO:%.*]] = ptrtoint i8* [[PO]] to i64
-; BE-NEXT:    [[OFFO:%.*]] = sub i64 [[IPO]], ptrtoint ([4 x i32]* @a to i64)
+; BE-NEXT:    store i64 13, i64* [[PSTOR4]], align 4
 ; BE-NEXT:    [[PSTOR6:%.*]] = getelementptr i64, i64* [[PCMP]], i64 6
-; BE-NEXT:    store i64 [[OFFO]], i64* [[PSTOR6]], align 4
-; BE-NEXT:    [[PP:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 112, i64 16)
-; BE-NEXT:    [[IPP:%.*]] = ptrtoint i8* [[PP]] to i64
-; BE-NEXT:    [[OFFP:%.*]] = sub i64 [[IPP]], ptrtoint ([4 x i32]* @a to i64)
+; BE-NEXT:    store i64 14, i64* [[PSTOR6]], align 4
 ; BE-NEXT:    [[PSTOR7:%.*]] = getelementptr i64, i64* [[PCMP]], i64 7
-; BE-NEXT:    store i64 [[OFFP]], i64* [[PSTOR7]], align 4
-; BE-NEXT:    [[PQ:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 113, i64 16)
-; BE-NEXT:    [[IPQ:%.*]] = ptrtoint i8* [[PQ]] to i64
+; BE-NEXT:    store i64 15, i64* [[PSTOR7]], align 4
 ; BE-NEXT:    [[PSTOR8:%.*]] = getelementptr i64, i64* [[PCMP]], i64 8
-; BE-NEXT:    store i64 [[IPQ]], i64* [[PSTOR8]], align 4
+; BE-NEXT:    store i64 0, i64* [[PSTOR8]], align 4
 ; BE-NEXT:    ret void
 ;
 ; LE-LABEL: @fold_memchr_a(
-; LE-NEXT:    [[PA:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 97, i64 16)
-; LE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
-; LE-NEXT:    [[OFFA:%.*]] = sub i64 [[IPA]], ptrtoint ([4 x i32]* @a to i64)
-; LE-NEXT:    store i64 [[OFFA]], i64* [[PCMP:%.*]], align 4
-; LE-NEXT:    [[PB:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 98, i64 16)
-; LE-NEXT:    [[IPB:%.*]] = ptrtoint i8* [[PB]] to i64
-; LE-NEXT:    [[OFFB:%.*]] = sub i64 [[IPB]], ptrtoint ([4 x i32]* @a to i64)
+; LE-NEXT:    store i64 3, i64* [[PCMP:%.*]], align 4
 ; LE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; LE-NEXT:    store i64 [[OFFB]], i64* [[PSTOR1]], align 4
-; LE-NEXT:    [[PC:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 99, i64 16)
-; LE-NEXT:    [[IPC:%.*]] = ptrtoint i8* [[PC]] to i64
-; LE-NEXT:    [[OFFC:%.*]] = sub i64 [[IPC]], ptrtoint ([4 x i32]* @a to i64)
+; LE-NEXT:    store i64 2, i64* [[PSTOR1]], align 4
 ; LE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; LE-NEXT:    store i64 [[OFFC]], i64* [[PSTOR2]], align 4
-; LE-NEXT:    [[PD:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 100, i64 16)
-; LE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
-; LE-NEXT:    [[OFFD:%.*]] = sub i64 [[IPD]], ptrtoint ([4 x i32]* @a to i64)
+; LE-NEXT:    store i64 1, i64* [[PSTOR2]], align 4
 ; LE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; LE-NEXT:    store i64 [[OFFD]], i64* [[PSTOR3]], align 4
-; LE-NEXT:    [[PN:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 110, i64 16)
-; LE-NEXT:    [[IPN:%.*]] = ptrtoint i8* [[PN]] to i64
-; LE-NEXT:    [[OFFN:%.*]] = sub i64 [[IPN]], ptrtoint ([4 x i32]* @a to i64)
+; LE-NEXT:    store i64 0, i64* [[PSTOR3]], align 4
 ; LE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; LE-NEXT:    store i64 [[OFFN]], i64* [[PSTOR4]], align 4
-; LE-NEXT:    [[PO:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 111, i64 16)
-; LE-NEXT:    [[IPO:%.*]] = ptrtoint i8* [[PO]] to i64
-; LE-NEXT:    [[OFFO:%.*]] = sub i64 [[IPO]], ptrtoint ([4 x i32]* @a to i64)
+; LE-NEXT:    store i64 14, i64* [[PSTOR4]], align 4
 ; LE-NEXT:    [[PSTOR6:%.*]] = getelementptr i64, i64* [[PCMP]], i64 6
-; LE-NEXT:    store i64 [[OFFO]], i64* [[PSTOR6]], align 4
-; LE-NEXT:    [[PP:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 112, i64 16)
-; LE-NEXT:    [[IPP:%.*]] = ptrtoint i8* [[PP]] to i64
-; LE-NEXT:    [[OFFP:%.*]] = sub i64 [[IPP]], ptrtoint ([4 x i32]* @a to i64)
+; LE-NEXT:    store i64 13, i64* [[PSTOR6]], align 4
 ; LE-NEXT:    [[PSTOR7:%.*]] = getelementptr i64, i64* [[PCMP]], i64 7
-; LE-NEXT:    store i64 [[OFFP]], i64* [[PSTOR7]], align 4
-; LE-NEXT:    [[PQ:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast ([4 x i32]* @a to i8*), i32 113, i64 16)
-; LE-NEXT:    [[IPQ:%.*]] = ptrtoint i8* [[PQ]] to i64
+; LE-NEXT:    store i64 12, i64* [[PSTOR7]], align 4
 ; LE-NEXT:    [[PSTOR8:%.*]] = getelementptr i64, i64* [[PCMP]], i64 8
-; LE-NEXT:    store i64 [[IPQ]], i64* [[PSTOR8]], align 4
+; LE-NEXT:    store i64 0, i64* [[PSTOR8]], align 4
 ; LE-NEXT:    ret void
 ;
   %p0 = getelementptr [4 x i32], [4 x i32]* @a, i64 0, i64 0
@@ -171,63 +125,31 @@ define void @fold_memchr_a(i64* %pcmp) {
 
 define void @fold_memchr_a_p1(i64* %pcmp) {
 ; BE-LABEL: @fold_memchr_a_p1(
-; BE-NEXT:    [[PE:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 101, i64 12)
-; BE-NEXT:    [[IPE:%.*]] = ptrtoint i8* [[PE]] to i64
-; BE-NEXT:    [[OFFE:%.*]] = sub i64 [[IPE]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
-; BE-NEXT:    store i64 [[OFFE]], i64* [[PCMP:%.*]], align 4
-; BE-NEXT:    [[PF:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 102, i64 12)
-; BE-NEXT:    [[IPF:%.*]] = ptrtoint i8* [[PF]] to i64
-; BE-NEXT:    [[OFFF:%.*]] = sub i64 [[IPF]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
+; BE-NEXT:    store i64 0, i64* [[PCMP:%.*]], align 4
 ; BE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; BE-NEXT:    store i64 [[OFFF]], i64* [[PSTOR1]], align 4
-; BE-NEXT:    [[PG:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 103, i64 12)
-; BE-NEXT:    [[IPG:%.*]] = ptrtoint i8* [[PG]] to i64
-; BE-NEXT:    [[OFFG:%.*]] = sub i64 [[IPG]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
+; BE-NEXT:    store i64 1, i64* [[PSTOR1]], align 4
 ; BE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; BE-NEXT:    store i64 [[OFFG]], i64* [[PSTOR2]], align 4
-; BE-NEXT:    [[PH:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 104, i64 12)
-; BE-NEXT:    [[IPH:%.*]] = ptrtoint i8* [[PH]] to i64
-; BE-NEXT:    [[OFFH:%.*]] = sub i64 [[IPH]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
+; BE-NEXT:    store i64 2, i64* [[PSTOR2]], align 4
 ; BE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; BE-NEXT:    store i64 [[OFFH]], i64* [[PSTOR3]], align 4
-; BE-NEXT:    [[PA:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 97, i64 12)
-; BE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
+; BE-NEXT:    store i64 3, i64* [[PSTOR3]], align 4
 ; BE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; BE-NEXT:    store i64 [[IPA]], i64* [[PSTOR4]], align 4
-; BE-NEXT:    [[PD:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 100, i64 12)
-; BE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
+; BE-NEXT:    store i64 0, i64* [[PSTOR4]], align 4
 ; BE-NEXT:    [[PSTOR5:%.*]] = getelementptr i64, i64* [[PCMP]], i64 5
-; BE-NEXT:    store i64 [[IPD]], i64* [[PSTOR5]], align 4
+; BE-NEXT:    store i64 0, i64* [[PSTOR5]], align 4
 ; BE-NEXT:    ret void
 ;
 ; LE-LABEL: @fold_memchr_a_p1(
-; LE-NEXT:    [[PE:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 101, i64 12)
-; LE-NEXT:    [[IPE:%.*]] = ptrtoint i8* [[PE]] to i64
-; LE-NEXT:    [[OFFE:%.*]] = sub i64 [[IPE]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
-; LE-NEXT:    store i64 [[OFFE]], i64* [[PCMP:%.*]], align 4
-; LE-NEXT:    [[PF:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 102, i64 12)
-; LE-NEXT:    [[IPF:%.*]] = ptrtoint i8* [[PF]] to i64
-; LE-NEXT:    [[OFFF:%.*]] = sub i64 [[IPF]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
+; LE-NEXT:    store i64 3, i64* [[PCMP:%.*]], align 4
 ; LE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; LE-NEXT:    store i64 [[OFFF]], i64* [[PSTOR1]], align 4
-; LE-NEXT:    [[PG:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 103, i64 12)
-; LE-NEXT:    [[IPG:%.*]] = ptrtoint i8* [[PG]] to i64
-; LE-NEXT:    [[OFFG:%.*]] = sub i64 [[IPG]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
+; LE-NEXT:    store i64 2, i64* [[PSTOR1]], align 4
 ; LE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; LE-NEXT:    store i64 [[OFFG]], i64* [[PSTOR2]], align 4
-; LE-NEXT:    [[PH:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 104, i64 12)
-; LE-NEXT:    [[IPH:%.*]] = ptrtoint i8* [[PH]] to i64
-; LE-NEXT:    [[OFFH:%.*]] = sub i64 [[IPH]], ptrtoint (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i64)
+; LE-NEXT:    store i64 1, i64* [[PSTOR2]], align 4
 ; LE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; LE-NEXT:    store i64 [[OFFH]], i64* [[PSTOR3]], align 4
-; LE-NEXT:    [[PA:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 97, i64 12)
-; LE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
+; LE-NEXT:    store i64 0, i64* [[PSTOR3]], align 4
 ; LE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; LE-NEXT:    store i64 [[IPA]], i64* [[PSTOR4]], align 4
-; LE-NEXT:    [[PD:%.*]] = call i8* @memchr(i8* noundef nonnull dereferenceable(1) bitcast (i32* getelementptr inbounds ([4 x i32], [4 x i32]* @a, i64 0, i64 1) to i8*), i32 100, i64 12)
-; LE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
+; LE-NEXT:    store i64 0, i64* [[PSTOR4]], align 4
 ; LE-NEXT:    [[PSTOR5:%.*]] = getelementptr i64, i64* [[PCMP]], i64 5
-; LE-NEXT:    store i64 [[IPD]], i64* [[PSTOR5]], align 4
+; LE-NEXT:    store i64 0, i64* [[PSTOR5]], align 4
 ; LE-NEXT:    ret void
 ;
   %p0 = getelementptr [4 x i32], [4 x i32]* @a, i64 0, i64 1

diff  --git a/llvm/test/Transforms/InstCombine/memchr-8.ll b/llvm/test/Transforms/InstCombine/memchr-8.ll
new file mode 100644
index 0000000000000..36ddd7f5de189
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/memchr-8.ll
@@ -0,0 +1,62 @@
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+;
+; Verify that a constant with size in excess of 32-bit SIZE_MAX doesn't
+; cause trouble.  This test exercises an internal limit set arbitrarily
+; at 64K for the largest supported zeroinitiazer. If the limit changes
+; the test might need to be adjusted.
+
+declare i8* @memrchr(i8*, i32, i64)
+
+ at a = constant <{ i8, [4294967295 x i8] }> <{ i8 1, [4294967295 x i8] zeroinitializer }>
+
+; Verify reading an initializer INT32_MAX + 1 bytes large (starting at
+; offset 2147483647 into a which is UINT32_MAX bytes in size).
+
+define i8* @call_a_pi32max_p1() {
+; CHECK-LABEL: @call_a_pi32max_p1(
+; CHECK-NEXT:    [[CHR:%.*]] = tail call i8* @memrchr(i8* noundef nonnull dereferenceable(2147483647) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i64 0, i32 1, i64 2147483647), i32 0, i64 2147483647)
+; CHECK-NEXT:    ret i8* [[CHR]]
+;
+  %ptr = getelementptr <{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i32 0, i32 1, i32 2147483647
+  %chr = tail call i8* @memrchr(i8* %ptr, i32 0, i64 2147483647)
+  ret i8* %chr
+}
+
+; Verify reading an initializer INT32_MAX bytes large (starting at offset
+; 2147483648 into a which is UINT32_MAX bytes in size).
+
+define i8* @call_a_pi32max() {
+; CHECK-LABEL: @call_a_pi32max(
+; CHECK-NEXT:    [[CHR:%.*]] = tail call i8* @memrchr(i8* noundef nonnull dereferenceable(2147483647) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i64 0, i32 1, i64 2147483648), i32 0, i64 2147483647)
+; CHECK-NEXT:    ret i8* [[CHR]]
+;
+  %ptr = getelementptr <{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i32 0, i32 1, i64 2147483648
+  %chr = tail call i8* @memrchr(i8* %ptr, i32 0, i64 2147483647)
+  ret i8* %chr
+}
+
+
+; Verify reading an initializer UINT32_MAX bytes large (starting at offset
+; 1 into a).
+
+define i8* @call_a_pui32max() {
+; CHECK-LABEL: @call_a_pui32max(
+; CHECK-NEXT:    [[CHR:%.*]] = tail call i8* @memrchr(i8* noundef nonnull dereferenceable(4294967295) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i64 0, i32 1, i64 0), i32 0, i64 4294967295)
+; CHECK-NEXT:    ret i8* [[CHR]]
+;
+  %ptr = getelementptr <{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i32 0, i32 1, i32 0
+  %chr = tail call i8* @memrchr(i8* %ptr, i32 0, i64 4294967295)
+  ret i8* %chr
+}
+
+; Verify reading an initializer UINT32_MAX + 1 bytes large (all of a).
+
+define i8* @call_a_puimax_p1() {
+; CHECK-LABEL: @call_a_puimax_p1(
+; CHECK-NEXT:    [[CHR:%.*]] = tail call i8* @memrchr(i8* noundef nonnull dereferenceable(4294967296) getelementptr inbounds (<{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i64 0, i32 0), i32 0, i64 4294967296)
+; CHECK-NEXT:    ret i8* [[CHR]]
+;
+  %ptr = getelementptr <{ i8, [4294967295 x i8] }>, <{ i8, [4294967295 x i8] }>* @a, i32 0, i32 0
+  %chr = tail call i8* @memrchr(i8* %ptr, i32 0, i64 4294967296)
+  ret i8* %chr
+}
\ No newline at end of file

diff  --git a/llvm/test/Transforms/InstCombine/memcmp-3.ll b/llvm/test/Transforms/InstCombine/memcmp-3.ll
index 143a5a6b0950c..cafb8523f3d71 100644
--- a/llvm/test/Transforms/InstCombine/memcmp-3.ll
+++ b/llvm/test/Transforms/InstCombine/memcmp-3.ll
@@ -8,69 +8,55 @@ declare i32 @memcmp(i8*, i8*, i64)
 
 ; BE representation: { 'a', 'b', 'c', ..., 'f', 'g', 'h' }
 ; LE representation: { 'b', 'a', 'd', ..., 'e', 'h', 'g' }
- at ia6a = constant [4 x i16] [i16 24930, i16 25444, i16 25958, i16 26472]
+ at ia16a = constant [4 x i16] [i16 24930, i16 25444, i16 25958, i16 26472]
 
 ; Same as the BE representation above except ending in "gg".
 @i8a = constant [8 x i8] c"abcdefgg"
 
-; Fold memcmp(ia6a, i8a, N) for N in [0, 8].
+; Fold memcmp(ia16a, i8a, N) for N in [0, 8].
 
-define void @fold_memcmp_ia6a_i8a(i32* %pcmp) {
-; BE-LABEL: @fold_memcmp_ia6a_i8a(
+define void @fold_memcmp_ia16a_i8a(i32* %pcmp) {
+; BE-LABEL: @fold_memcmp_ia16a_i8a(
 ; BE-NEXT:    store i32 0, i32* [[PCMP:%.*]], align 4
 ; BE-NEXT:    [[PSTOR1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
 ; BE-NEXT:    store i32 0, i32* [[PSTOR1]], align 4
-; BE-NEXT:    [[CMP2:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(2) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(2) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 2)
 ; BE-NEXT:    [[PSTOR2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; BE-NEXT:    store i32 [[CMP2]], i32* [[PSTOR2]], align 4
-; BE-NEXT:    [[CMP3:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(3) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(3) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 3)
+; BE-NEXT:    store i32 0, i32* [[PSTOR2]], align 4
 ; BE-NEXT:    [[PSTOR3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; BE-NEXT:    store i32 [[CMP3]], i32* [[PSTOR3]], align 4
-; BE-NEXT:    [[CMP4:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(4) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 4)
+; BE-NEXT:    store i32 0, i32* [[PSTOR3]], align 4
 ; BE-NEXT:    [[PSTOR4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; BE-NEXT:    store i32 [[CMP4]], i32* [[PSTOR4]], align 4
-; BE-NEXT:    [[CMP5:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(5) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(5) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 5)
+; BE-NEXT:    store i32 0, i32* [[PSTOR4]], align 4
 ; BE-NEXT:    [[PSTOR5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; BE-NEXT:    store i32 [[CMP5]], i32* [[PSTOR5]], align 4
-; BE-NEXT:    [[CMP6:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(6) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(6) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 6)
+; BE-NEXT:    store i32 0, i32* [[PSTOR5]], align 4
 ; BE-NEXT:    [[PSTOR6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; BE-NEXT:    store i32 [[CMP6]], i32* [[PSTOR6]], align 4
-; BE-NEXT:    [[CMP7:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(7) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(7) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 7)
+; BE-NEXT:    store i32 0, i32* [[PSTOR6]], align 4
 ; BE-NEXT:    [[PSTOR7:%.*]] = getelementptr i32, i32* [[PCMP]], i64 7
-; BE-NEXT:    store i32 [[CMP7]], i32* [[PSTOR7]], align 4
-; BE-NEXT:    [[CMP8:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(8) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(8) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 8)
+; BE-NEXT:    store i32 0, i32* [[PSTOR7]], align 4
 ; BE-NEXT:    [[PSTOR8:%.*]] = getelementptr i32, i32* [[PCMP]], i64 8
-; BE-NEXT:    store i32 [[CMP8]], i32* [[PSTOR8]], align 4
+; BE-NEXT:    store i32 1, i32* [[PSTOR8]], align 4
 ; BE-NEXT:    ret void
 ;
-; LE-LABEL: @fold_memcmp_ia6a_i8a(
+; LE-LABEL: @fold_memcmp_ia16a_i8a(
 ; LE-NEXT:    store i32 0, i32* [[PCMP:%.*]], align 4
 ; LE-NEXT:    [[PSTOR1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
 ; LE-NEXT:    store i32 1, i32* [[PSTOR1]], align 4
-; LE-NEXT:    [[CMP2:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(2) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(2) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 2)
 ; LE-NEXT:    [[PSTOR2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; LE-NEXT:    store i32 [[CMP2]], i32* [[PSTOR2]], align 4
-; LE-NEXT:    [[CMP3:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(3) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(3) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 3)
+; LE-NEXT:    store i32 1, i32* [[PSTOR2]], align 4
 ; LE-NEXT:    [[PSTOR3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; LE-NEXT:    store i32 [[CMP3]], i32* [[PSTOR3]], align 4
-; LE-NEXT:    [[CMP4:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(4) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 4)
+; LE-NEXT:    store i32 1, i32* [[PSTOR3]], align 4
 ; LE-NEXT:    [[PSTOR4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; LE-NEXT:    store i32 [[CMP4]], i32* [[PSTOR4]], align 4
-; LE-NEXT:    [[CMP5:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(5) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(5) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 5)
+; LE-NEXT:    store i32 1, i32* [[PSTOR4]], align 4
 ; LE-NEXT:    [[PSTOR5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; LE-NEXT:    store i32 [[CMP5]], i32* [[PSTOR5]], align 4
-; LE-NEXT:    [[CMP6:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(6) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(6) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 6)
+; LE-NEXT:    store i32 1, i32* [[PSTOR5]], align 4
 ; LE-NEXT:    [[PSTOR6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; LE-NEXT:    store i32 [[CMP6]], i32* [[PSTOR6]], align 4
-; LE-NEXT:    [[CMP7:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(7) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(7) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 7)
+; LE-NEXT:    store i32 1, i32* [[PSTOR6]], align 4
 ; LE-NEXT:    [[PSTOR7:%.*]] = getelementptr i32, i32* [[PCMP]], i64 7
-; LE-NEXT:    store i32 [[CMP7]], i32* [[PSTOR7]], align 4
-; LE-NEXT:    [[CMP8:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(8) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(8) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 8)
+; LE-NEXT:    store i32 1, i32* [[PSTOR7]], align 4
 ; LE-NEXT:    [[PSTOR8:%.*]] = getelementptr i32, i32* [[PCMP]], i64 8
-; LE-NEXT:    store i32 [[CMP8]], i32* [[PSTOR8]], align 4
+; LE-NEXT:    store i32 1, i32* [[PSTOR8]], align 4
 ; LE-NEXT:    ret void
 ;
-  %p0 = getelementptr [4 x i16], [4 x i16]* @ia6a, i64 0, i64 0
+  %p0 = getelementptr [4 x i16], [4 x i16]* @ia16a, i64 0, i64 0
   %p1 = bitcast i16* %p0 to i8*
   %q = getelementptr [8 x i8], [8 x i8]* @i8a, i64 0, i64 0
 
@@ -114,52 +100,42 @@ define void @fold_memcmp_ia6a_i8a(i32* %pcmp) {
 }
 
 
-; Fold memcmp(ia6a + 1, i8a + 2, N) for N in [0, 6].
+; Fold memcmp(ia16a + 1, i8a + 2, N) for N in [0, 6].
 
-define void @fold_memcmp_ia6a_p1_i8a_p1(i32* %pcmp) {
-; BE-LABEL: @fold_memcmp_ia6a_p1_i8a_p1(
+define void @fold_memcmp_ia16a_p1_i8a_p1(i32* %pcmp) {
+; BE-LABEL: @fold_memcmp_ia16a_p1_i8a_p1(
 ; BE-NEXT:    store i32 0, i32* [[PCMP:%.*]], align 4
 ; BE-NEXT:    [[PSTOR1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
 ; BE-NEXT:    store i32 1, i32* [[PSTOR1]], align 4
-; BE-NEXT:    [[CMP2:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(2) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(2) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 2)
 ; BE-NEXT:    [[PSTOR2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; BE-NEXT:    store i32 [[CMP2]], i32* [[PSTOR2]], align 4
-; BE-NEXT:    [[CMP3:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(3) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(3) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 3)
+; BE-NEXT:    store i32 1, i32* [[PSTOR2]], align 4
 ; BE-NEXT:    [[PSTOR3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; BE-NEXT:    store i32 [[CMP3]], i32* [[PSTOR3]], align 4
-; BE-NEXT:    [[CMP4:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(4) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 4)
+; BE-NEXT:    store i32 1, i32* [[PSTOR3]], align 4
 ; BE-NEXT:    [[PSTOR4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; BE-NEXT:    store i32 [[CMP4]], i32* [[PSTOR4]], align 4
-; BE-NEXT:    [[CMP5:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(5) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(5) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 5)
+; BE-NEXT:    store i32 1, i32* [[PSTOR4]], align 4
 ; BE-NEXT:    [[PSTOR5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; BE-NEXT:    store i32 [[CMP5]], i32* [[PSTOR5]], align 4
-; BE-NEXT:    [[CMP6:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(6) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(6) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 6)
+; BE-NEXT:    store i32 1, i32* [[PSTOR5]], align 4
 ; BE-NEXT:    [[PSTOR6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; BE-NEXT:    store i32 [[CMP6]], i32* [[PSTOR6]], align 4
+; BE-NEXT:    store i32 1, i32* [[PSTOR6]], align 4
 ; BE-NEXT:    ret void
 ;
-; LE-LABEL: @fold_memcmp_ia6a_p1_i8a_p1(
+; LE-LABEL: @fold_memcmp_ia16a_p1_i8a_p1(
 ; LE-NEXT:    store i32 0, i32* [[PCMP:%.*]], align 4
 ; LE-NEXT:    [[PSTOR1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; LE-NEXT:    store i32 2, i32* [[PSTOR1]], align 4
-; LE-NEXT:    [[CMP2:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(2) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(2) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 2)
+; LE-NEXT:    store i32 1, i32* [[PSTOR1]], align 4
 ; LE-NEXT:    [[PSTOR2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; LE-NEXT:    store i32 [[CMP2]], i32* [[PSTOR2]], align 4
-; LE-NEXT:    [[CMP3:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(3) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(3) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 3)
+; LE-NEXT:    store i32 1, i32* [[PSTOR2]], align 4
 ; LE-NEXT:    [[PSTOR3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; LE-NEXT:    store i32 [[CMP3]], i32* [[PSTOR3]], align 4
-; LE-NEXT:    [[CMP4:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(4) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 4)
+; LE-NEXT:    store i32 1, i32* [[PSTOR3]], align 4
 ; LE-NEXT:    [[PSTOR4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; LE-NEXT:    store i32 [[CMP4]], i32* [[PSTOR4]], align 4
-; LE-NEXT:    [[CMP5:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(5) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(5) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 5)
+; LE-NEXT:    store i32 1, i32* [[PSTOR4]], align 4
 ; LE-NEXT:    [[PSTOR5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; LE-NEXT:    store i32 [[CMP5]], i32* [[PSTOR5]], align 4
-; LE-NEXT:    [[CMP6:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(6) bitcast (i16* getelementptr inbounds ([4 x i16], [4 x i16]* @ia6a, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(6) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 6)
+; LE-NEXT:    store i32 1, i32* [[PSTOR5]], align 4
 ; LE-NEXT:    [[PSTOR6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; LE-NEXT:    store i32 [[CMP6]], i32* [[PSTOR6]], align 4
+; LE-NEXT:    store i32 1, i32* [[PSTOR6]], align 4
 ; LE-NEXT:    ret void
 ;
-  %p0 = getelementptr [4 x i16], [4 x i16]* @ia6a, i64 0, i64 1
+  %p0 = getelementptr [4 x i16], [4 x i16]* @ia16a, i64 0, i64 1
   %p1 = bitcast i16* %p0 to i8*
   %q = getelementptr [8 x i8], [8 x i8]* @i8a, i64 0, i64 1
 
@@ -193,61 +169,3 @@ define void @fold_memcmp_ia6a_p1_i8a_p1(i32* %pcmp) {
 
   ret void
 }
-
-
-; Don't fold calls with excessive sizes.
-
-define void @call_memcmp_too_big(i32* %pcmp) {
-; BE-LABEL: @call_memcmp_too_big(
-; BE-NEXT:    [[CMP9:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(9) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(9) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 9)
-; BE-NEXT:    store i32 [[CMP9]], i32* [[PCMP:%.*]], align 4
-; BE-NEXT:    [[CMPM1:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(18446744073709551615) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(18446744073709551615) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 -1)
-; BE-NEXT:    [[PSTORM1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; BE-NEXT:    store i32 [[CMPM1]], i32* [[PSTORM1]], align 4
-; BE-NEXT:    [[CMPX2:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(8) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(8) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 8)
-; BE-NEXT:    [[PSTORX2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; BE-NEXT:    store i32 [[CMPX2]], i32* [[PSTORX2]], align 4
-; BE-NEXT:    [[CMPX1:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(8) getelementptr (i8, i8* bitcast ([4 x i16]* @ia6a to i8*), i64 1), i8* noundef nonnull dereferenceable(8) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 8)
-; BE-NEXT:    [[PSTORX1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; BE-NEXT:    store i32 [[CMPX1]], i32* [[PSTORX1]], align 4
-; BE-NEXT:    ret void
-;
-; LE-LABEL: @call_memcmp_too_big(
-; LE-NEXT:    [[CMP9:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(9) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(9) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 9)
-; LE-NEXT:    store i32 [[CMP9]], i32* [[PCMP:%.*]], align 4
-; LE-NEXT:    [[CMPM1:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(18446744073709551615) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(18446744073709551615) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 -1)
-; LE-NEXT:    [[PSTORM1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; LE-NEXT:    store i32 [[CMPM1]], i32* [[PSTORM1]], align 4
-; LE-NEXT:    [[CMPX2:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(8) bitcast ([4 x i16]* @ia6a to i8*), i8* noundef nonnull dereferenceable(8) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 1), i64 8)
-; LE-NEXT:    [[PSTORX2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; LE-NEXT:    store i32 [[CMPX2]], i32* [[PSTORX2]], align 4
-; LE-NEXT:    [[CMPX1:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(8) getelementptr (i8, i8* bitcast ([4 x i16]* @ia6a to i8*), i64 1), i8* noundef nonnull dereferenceable(8) getelementptr inbounds ([8 x i8], [8 x i8]* @i8a, i64 0, i64 0), i64 8)
-; LE-NEXT:    [[PSTORX1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; LE-NEXT:    store i32 [[CMPX1]], i32* [[PSTORX1]], align 4
-; LE-NEXT:    ret void
-;
-  %p0 = getelementptr [4 x i16], [4 x i16]* @ia6a, i64 0, i64 0
-  %p1 = bitcast i16* %p0 to i8*
-  %q = getelementptr [8 x i8], [8 x i8]* @i8a, i64 0, i64 0
-
-  %cmp9 = call i32 @memcmp(i8* %p1, i8* %q, i64 9)
-  %pstor9 = getelementptr i32, i32* %pcmp, i64 0
-  store i32 %cmp9, i32* %pstor9
-
-  %cmpm1 = call i32 @memcmp(i8* %p1, i8* %q, i64 -1)
-  %pstorm1 = getelementptr i32, i32* %pcmp, i64 1
-  store i32 %cmpm1, i32* %pstorm1
-
-  ; Exercise size that's greater than just one of the arrays.
-  %q2 = getelementptr [8 x i8], [8 x i8]* @i8a, i64 0, i64 1
-  %cmpx2 = call i32 @memcmp(i8* %p1, i8* %q2, i64 8)
-  %pstorx2 = getelementptr i32, i32* %pcmp, i64 2
-  store i32 %cmpx2, i32* %pstorx2
-
-  %p2 = getelementptr i8, i8* %p1, i64 1
-  %cmpx1 = call i32 @memcmp(i8* %p2, i8* %q, i64 8)
-  %pstorx1 = getelementptr i32, i32* %pcmp, i64 2
-  store i32 %cmpx1, i32* %pstorx1
-
-  ret void
-}

diff  --git a/llvm/test/Transforms/InstCombine/memcmp-4.ll b/llvm/test/Transforms/InstCombine/memcmp-4.ll
index 50c64f6e6413b..e0ed0ceaf2ef7 100644
--- a/llvm/test/Transforms/InstCombine/memcmp-4.ll
+++ b/llvm/test/Transforms/InstCombine/memcmp-4.ll
@@ -19,19 +19,15 @@ declare i32 @memcmp(i8*, i8*, i64)
 
 define void @fold_memcmp_too_big(i32* %pcmp) {
 ; BE-LABEL: @fold_memcmp_too_big(
-; BE-NEXT:    [[CMP_BC:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(12) bitcast ([5 x i16]* @ia16b to i8*), i8* noundef nonnull dereferenceable(12) bitcast ([6 x i16]* @ia16c to i8*), i64 12)
-; BE-NEXT:    store i32 [[CMP_BC]], i32* [[PCMP:%.*]], align 4
-; BE-NEXT:    [[CMP_CB:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(12) bitcast ([6 x i16]* @ia16c to i8*), i8* noundef nonnull dereferenceable(12) bitcast ([5 x i16]* @ia16b to i8*), i64 12)
+; BE-NEXT:    store i32 -1, i32* [[PCMP:%.*]], align 4
 ; BE-NEXT:    [[PSTOR_CB:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; BE-NEXT:    store i32 [[CMP_CB]], i32* [[PSTOR_CB]], align 4
+; BE-NEXT:    store i32 1, i32* [[PSTOR_CB]], align 4
 ; BE-NEXT:    ret void
 ;
 ; LE-LABEL: @fold_memcmp_too_big(
-; LE-NEXT:    [[CMP_BC:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(12) bitcast ([5 x i16]* @ia16b to i8*), i8* noundef nonnull dereferenceable(12) bitcast ([6 x i16]* @ia16c to i8*), i64 12)
-; LE-NEXT:    store i32 [[CMP_BC]], i32* [[PCMP:%.*]], align 4
-; LE-NEXT:    [[CMP_CB:%.*]] = call i32 @memcmp(i8* noundef nonnull dereferenceable(12) bitcast ([6 x i16]* @ia16c to i8*), i8* noundef nonnull dereferenceable(12) bitcast ([5 x i16]* @ia16b to i8*), i64 12)
+; LE-NEXT:    store i32 -1, i32* [[PCMP:%.*]], align 4
 ; LE-NEXT:    [[PSTOR_CB:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
-; LE-NEXT:    store i32 [[CMP_CB]], i32* [[PSTOR_CB]], align 4
+; LE-NEXT:    store i32 1, i32* [[PSTOR_CB]], align 4
 ; LE-NEXT:    ret void
 ;
   %p0 = getelementptr [5 x i16], [5 x i16]* @ia16b, i64 0, i64 0

diff  --git a/llvm/test/Transforms/InstCombine/memcmp-constant-fold.ll b/llvm/test/Transforms/InstCombine/memcmp-constant-fold.ll
index 00cde089865cc..b24e5a2193533 100644
--- a/llvm/test/Transforms/InstCombine/memcmp-constant-fold.ll
+++ b/llvm/test/Transforms/InstCombine/memcmp-constant-fold.ll
@@ -49,15 +49,17 @@ define i1 @memcmp_4bytes_unaligned_constant_i16(i8* align 4 %x) {
   ret i1 %cmpeq0
 }
 
-; TODO: Any memcmp where all arguments are constants should be constant folded. Currently, we only handle i8 array constants.
+; Verif that a memcmp call where all arguments are constants is constant
+; folded even for arrays of other types than i8.
 
 @intbuf = private unnamed_addr constant [2 x i32] [i32 0, i32 1], align 4
 
 define i1 @memcmp_3bytes_aligned_constant_i32(i8* align 4 %x) {
-; ALL-LABEL: @memcmp_3bytes_aligned_constant_i32(
-; ALL-NEXT:    [[CALL:%.*]] = tail call i32 @memcmp(i8* noundef nonnull dereferenceable(3) bitcast (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @intbuf, i64 0, i64 1) to i8*), i8* noundef nonnull dereferenceable(3) bitcast ([2 x i32]* @intbuf to i8*), i64 3)
-; ALL-NEXT:    [[CMPEQ0:%.*]] = icmp eq i32 [[CALL]], 0
-; ALL-NEXT:    ret i1 [[CMPEQ0]]
+; LE-LABEL: @memcmp_3bytes_aligned_constant_i32(
+; LE-NEXT:    ret i1 false
+;
+; BE-LABEL: @memcmp_3bytes_aligned_constant_i32(
+; BE-NEXT:    ret i1 true
 ;
   %call = tail call i32 @memcmp(i8* bitcast (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @intbuf, i64 0, i64 1) to i8*), i8* bitcast (i32* getelementptr inbounds ([2 x i32], [2 x i32]* @intbuf, i64 0, i64 0) to i8*), i64 3)
   %cmpeq0 = icmp eq i32 %call, 0

diff  --git a/llvm/test/Transforms/InstCombine/memrchr-5.ll b/llvm/test/Transforms/InstCombine/memrchr-5.ll
index caae72f68e3db..43156559123eb 100644
--- a/llvm/test/Transforms/InstCombine/memrchr-5.ll
+++ b/llvm/test/Transforms/InstCombine/memrchr-5.ll
@@ -15,85 +15,39 @@ declare i8* @memrchr(i8*, i32, i64)
 
 define void @fold_memrchr_a_16(i64* %pcmp) {
 ; BE-LABEL: @fold_memrchr_a_16(
-; BE-NEXT:    [[PA:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 97, i64 16)
-; BE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
-; BE-NEXT:    [[OFFA:%.*]] = sub i64 [[IPA]], ptrtoint ([5 x i32]* @a to i64)
-; BE-NEXT:    store i64 [[OFFA]], i64* [[PCMP:%.*]], align 4
-; BE-NEXT:    [[PB:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 98, i64 16)
-; BE-NEXT:    [[IPB:%.*]] = ptrtoint i8* [[PB]] to i64
-; BE-NEXT:    [[OFFB:%.*]] = sub i64 [[IPB]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 0, i64* [[PCMP:%.*]], align 4
 ; BE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; BE-NEXT:    store i64 [[OFFB]], i64* [[PSTOR1]], align 4
-; BE-NEXT:    [[PC:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 99, i64 16)
-; BE-NEXT:    [[IPC:%.*]] = ptrtoint i8* [[PC]] to i64
-; BE-NEXT:    [[OFFC:%.*]] = sub i64 [[IPC]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 1, i64* [[PSTOR1]], align 4
 ; BE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; BE-NEXT:    store i64 [[OFFC]], i64* [[PSTOR2]], align 4
-; BE-NEXT:    [[PD:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 100, i64 16)
-; BE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
-; BE-NEXT:    [[OFFD:%.*]] = sub i64 [[IPD]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 2, i64* [[PSTOR2]], align 4
 ; BE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; BE-NEXT:    store i64 [[OFFD]], i64* [[PSTOR3]], align 4
-; BE-NEXT:    [[PN:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 110, i64 16)
-; BE-NEXT:    [[IPN:%.*]] = ptrtoint i8* [[PN]] to i64
-; BE-NEXT:    [[OFFN:%.*]] = sub i64 [[IPN]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 3, i64* [[PSTOR3]], align 4
 ; BE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; BE-NEXT:    store i64 [[OFFN]], i64* [[PSTOR4]], align 4
-; BE-NEXT:    [[PO:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 111, i64 16)
-; BE-NEXT:    [[IPO:%.*]] = ptrtoint i8* [[PO]] to i64
-; BE-NEXT:    [[OFFO:%.*]] = sub i64 [[IPO]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 13, i64* [[PSTOR4]], align 4
 ; BE-NEXT:    [[PSTOR6:%.*]] = getelementptr i64, i64* [[PCMP]], i64 6
-; BE-NEXT:    store i64 [[OFFO]], i64* [[PSTOR6]], align 4
-; BE-NEXT:    [[PP:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 112, i64 16)
-; BE-NEXT:    [[IPP:%.*]] = ptrtoint i8* [[PP]] to i64
-; BE-NEXT:    [[OFFP:%.*]] = sub i64 [[IPP]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 14, i64* [[PSTOR6]], align 4
 ; BE-NEXT:    [[PSTOR7:%.*]] = getelementptr i64, i64* [[PCMP]], i64 7
-; BE-NEXT:    store i64 [[OFFP]], i64* [[PSTOR7]], align 4
-; BE-NEXT:    [[PQ:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 113, i64 16)
-; BE-NEXT:    [[IPQ:%.*]] = ptrtoint i8* [[PQ]] to i64
+; BE-NEXT:    store i64 15, i64* [[PSTOR7]], align 4
 ; BE-NEXT:    [[PSTOR8:%.*]] = getelementptr i64, i64* [[PCMP]], i64 8
-; BE-NEXT:    store i64 [[IPQ]], i64* [[PSTOR8]], align 4
+; BE-NEXT:    store i64 0, i64* [[PSTOR8]], align 4
 ; BE-NEXT:    ret void
 ;
 ; LE-LABEL: @fold_memrchr_a_16(
-; LE-NEXT:    [[PA:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 97, i64 16)
-; LE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
-; LE-NEXT:    [[OFFA:%.*]] = sub i64 [[IPA]], ptrtoint ([5 x i32]* @a to i64)
-; LE-NEXT:    store i64 [[OFFA]], i64* [[PCMP:%.*]], align 4
-; LE-NEXT:    [[PB:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 98, i64 16)
-; LE-NEXT:    [[IPB:%.*]] = ptrtoint i8* [[PB]] to i64
-; LE-NEXT:    [[OFFB:%.*]] = sub i64 [[IPB]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 3, i64* [[PCMP:%.*]], align 4
 ; LE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; LE-NEXT:    store i64 [[OFFB]], i64* [[PSTOR1]], align 4
-; LE-NEXT:    [[PC:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 99, i64 16)
-; LE-NEXT:    [[IPC:%.*]] = ptrtoint i8* [[PC]] to i64
-; LE-NEXT:    [[OFFC:%.*]] = sub i64 [[IPC]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 2, i64* [[PSTOR1]], align 4
 ; LE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; LE-NEXT:    store i64 [[OFFC]], i64* [[PSTOR2]], align 4
-; LE-NEXT:    [[PD:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 100, i64 16)
-; LE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
-; LE-NEXT:    [[OFFD:%.*]] = sub i64 [[IPD]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 1, i64* [[PSTOR2]], align 4
 ; LE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; LE-NEXT:    store i64 [[OFFD]], i64* [[PSTOR3]], align 4
-; LE-NEXT:    [[PN:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 110, i64 16)
-; LE-NEXT:    [[IPN:%.*]] = ptrtoint i8* [[PN]] to i64
-; LE-NEXT:    [[OFFN:%.*]] = sub i64 [[IPN]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 0, i64* [[PSTOR3]], align 4
 ; LE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; LE-NEXT:    store i64 [[OFFN]], i64* [[PSTOR4]], align 4
-; LE-NEXT:    [[PO:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 111, i64 16)
-; LE-NEXT:    [[IPO:%.*]] = ptrtoint i8* [[PO]] to i64
-; LE-NEXT:    [[OFFO:%.*]] = sub i64 [[IPO]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 14, i64* [[PSTOR4]], align 4
 ; LE-NEXT:    [[PSTOR6:%.*]] = getelementptr i64, i64* [[PCMP]], i64 6
-; LE-NEXT:    store i64 [[OFFO]], i64* [[PSTOR6]], align 4
-; LE-NEXT:    [[PP:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 112, i64 16)
-; LE-NEXT:    [[IPP:%.*]] = ptrtoint i8* [[PP]] to i64
-; LE-NEXT:    [[OFFP:%.*]] = sub i64 [[IPP]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 13, i64* [[PSTOR6]], align 4
 ; LE-NEXT:    [[PSTOR7:%.*]] = getelementptr i64, i64* [[PCMP]], i64 7
-; LE-NEXT:    store i64 [[OFFP]], i64* [[PSTOR7]], align 4
-; LE-NEXT:    [[PQ:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(16) bitcast ([5 x i32]* @a to i8*), i32 113, i64 16)
-; LE-NEXT:    [[IPQ:%.*]] = ptrtoint i8* [[PQ]] to i64
+; LE-NEXT:    store i64 12, i64* [[PSTOR7]], align 4
 ; LE-NEXT:    [[PSTOR8:%.*]] = getelementptr i64, i64* [[PCMP]], i64 8
-; LE-NEXT:    store i64 [[IPQ]], i64* [[PSTOR8]], align 4
+; LE-NEXT:    store i64 0, i64* [[PSTOR8]], align 4
 ; LE-NEXT:    ret void
 ;
   %p0 = getelementptr [5 x i32], [5 x i32]* @a, i64 0, i64 0
@@ -171,63 +125,31 @@ define void @fold_memrchr_a_16(i64* %pcmp) {
 
 define void @fold_memrchr_a_p1_16(i64* %pcmp) {
 ; BE-LABEL: @fold_memrchr_a_p1_16(
-; BE-NEXT:    [[PE:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 101, i64 12)
-; BE-NEXT:    [[IPE:%.*]] = ptrtoint i8* [[PE]] to i64
-; BE-NEXT:    [[OFFE:%.*]] = sub i64 [[IPE]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
-; BE-NEXT:    store i64 [[OFFE]], i64* [[PCMP:%.*]], align 4
-; BE-NEXT:    [[PF:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 102, i64 12)
-; BE-NEXT:    [[IPF:%.*]] = ptrtoint i8* [[PF]] to i64
-; BE-NEXT:    [[OFFF:%.*]] = sub i64 [[IPF]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
+; BE-NEXT:    store i64 0, i64* [[PCMP:%.*]], align 4
 ; BE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; BE-NEXT:    store i64 [[OFFF]], i64* [[PSTOR1]], align 4
-; BE-NEXT:    [[PG:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 103, i64 12)
-; BE-NEXT:    [[IPG:%.*]] = ptrtoint i8* [[PG]] to i64
-; BE-NEXT:    [[OFFG:%.*]] = sub i64 [[IPG]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
+; BE-NEXT:    store i64 1, i64* [[PSTOR1]], align 4
 ; BE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; BE-NEXT:    store i64 [[OFFG]], i64* [[PSTOR2]], align 4
-; BE-NEXT:    [[PH:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 104, i64 12)
-; BE-NEXT:    [[IPH:%.*]] = ptrtoint i8* [[PH]] to i64
-; BE-NEXT:    [[OFFH:%.*]] = sub i64 [[IPH]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
+; BE-NEXT:    store i64 2, i64* [[PSTOR2]], align 4
 ; BE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; BE-NEXT:    store i64 [[OFFH]], i64* [[PSTOR3]], align 4
-; BE-NEXT:    [[PA:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 97, i64 12)
-; BE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
+; BE-NEXT:    store i64 3, i64* [[PSTOR3]], align 4
 ; BE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; BE-NEXT:    store i64 [[IPA]], i64* [[PSTOR4]], align 4
-; BE-NEXT:    [[PD:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 100, i64 12)
-; BE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
+; BE-NEXT:    store i64 0, i64* [[PSTOR4]], align 4
 ; BE-NEXT:    [[PSTOR5:%.*]] = getelementptr i64, i64* [[PCMP]], i64 5
-; BE-NEXT:    store i64 [[IPD]], i64* [[PSTOR5]], align 4
+; BE-NEXT:    store i64 0, i64* [[PSTOR5]], align 4
 ; BE-NEXT:    ret void
 ;
 ; LE-LABEL: @fold_memrchr_a_p1_16(
-; LE-NEXT:    [[PE:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 101, i64 12)
-; LE-NEXT:    [[IPE:%.*]] = ptrtoint i8* [[PE]] to i64
-; LE-NEXT:    [[OFFE:%.*]] = sub i64 [[IPE]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
-; LE-NEXT:    store i64 [[OFFE]], i64* [[PCMP:%.*]], align 4
-; LE-NEXT:    [[PF:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 102, i64 12)
-; LE-NEXT:    [[IPF:%.*]] = ptrtoint i8* [[PF]] to i64
-; LE-NEXT:    [[OFFF:%.*]] = sub i64 [[IPF]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
+; LE-NEXT:    store i64 3, i64* [[PCMP:%.*]], align 4
 ; LE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; LE-NEXT:    store i64 [[OFFF]], i64* [[PSTOR1]], align 4
-; LE-NEXT:    [[PG:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 103, i64 12)
-; LE-NEXT:    [[IPG:%.*]] = ptrtoint i8* [[PG]] to i64
-; LE-NEXT:    [[OFFG:%.*]] = sub i64 [[IPG]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
+; LE-NEXT:    store i64 2, i64* [[PSTOR1]], align 4
 ; LE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; LE-NEXT:    store i64 [[OFFG]], i64* [[PSTOR2]], align 4
-; LE-NEXT:    [[PH:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 104, i64 12)
-; LE-NEXT:    [[IPH:%.*]] = ptrtoint i8* [[PH]] to i64
-; LE-NEXT:    [[OFFH:%.*]] = sub i64 [[IPH]], ptrtoint (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i64)
+; LE-NEXT:    store i64 1, i64* [[PSTOR2]], align 4
 ; LE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; LE-NEXT:    store i64 [[OFFH]], i64* [[PSTOR3]], align 4
-; LE-NEXT:    [[PA:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 97, i64 12)
-; LE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
+; LE-NEXT:    store i64 0, i64* [[PSTOR3]], align 4
 ; LE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; LE-NEXT:    store i64 [[IPA]], i64* [[PSTOR4]], align 4
-; LE-NEXT:    [[PD:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(12) bitcast (i32* getelementptr inbounds ([5 x i32], [5 x i32]* @a, i64 0, i64 1) to i8*), i32 100, i64 12)
-; LE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
+; LE-NEXT:    store i64 0, i64* [[PSTOR4]], align 4
 ; LE-NEXT:    [[PSTOR5:%.*]] = getelementptr i64, i64* [[PCMP]], i64 5
-; LE-NEXT:    store i64 [[IPD]], i64* [[PSTOR5]], align 4
+; LE-NEXT:    store i64 0, i64* [[PSTOR5]], align 4
 ; LE-NEXT:    ret void
 ;
   %p0 = getelementptr [5 x i32], [5 x i32]* @a, i64 0, i64 1
@@ -288,57 +210,27 @@ define void @fold_memrchr_a_p1_16(i64* %pcmp) {
 
 define void @fold_memrchr_a_20(i64* %pcmp) {
 ; BE-LABEL: @fold_memrchr_a_20(
-; BE-NEXT:    [[PA:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 97, i64 20)
-; BE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
-; BE-NEXT:    [[OFFA:%.*]] = sub i64 [[IPA]], ptrtoint ([5 x i32]* @a to i64)
-; BE-NEXT:    store i64 [[OFFA]], i64* [[PCMP:%.*]], align 4
-; BE-NEXT:    [[PB:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 98, i64 20)
-; BE-NEXT:    [[IPB:%.*]] = ptrtoint i8* [[PB]] to i64
-; BE-NEXT:    [[OFFB:%.*]] = sub i64 [[IPB]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 16, i64* [[PCMP:%.*]], align 4
 ; BE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; BE-NEXT:    store i64 [[OFFB]], i64* [[PSTOR1]], align 4
-; BE-NEXT:    [[PC:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 99, i64 20)
-; BE-NEXT:    [[IPC:%.*]] = ptrtoint i8* [[PC]] to i64
-; BE-NEXT:    [[OFFC:%.*]] = sub i64 [[IPC]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 17, i64* [[PSTOR1]], align 4
 ; BE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; BE-NEXT:    store i64 [[OFFC]], i64* [[PSTOR2]], align 4
-; BE-NEXT:    [[PD:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 100, i64 20)
-; BE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
-; BE-NEXT:    [[OFFD:%.*]] = sub i64 [[IPD]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 18, i64* [[PSTOR2]], align 4
 ; BE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; BE-NEXT:    store i64 [[OFFD]], i64* [[PSTOR3]], align 4
-; BE-NEXT:    [[PE:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 101, i64 20)
-; BE-NEXT:    [[IPE:%.*]] = ptrtoint i8* [[PE]] to i64
-; BE-NEXT:    [[OFFE:%.*]] = sub i64 [[IPE]], ptrtoint ([5 x i32]* @a to i64)
+; BE-NEXT:    store i64 19, i64* [[PSTOR3]], align 4
 ; BE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; BE-NEXT:    store i64 [[OFFE]], i64* [[PSTOR4]], align 4
+; BE-NEXT:    store i64 4, i64* [[PSTOR4]], align 4
 ; BE-NEXT:    ret void
 ;
 ; LE-LABEL: @fold_memrchr_a_20(
-; LE-NEXT:    [[PA:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 97, i64 20)
-; LE-NEXT:    [[IPA:%.*]] = ptrtoint i8* [[PA]] to i64
-; LE-NEXT:    [[OFFA:%.*]] = sub i64 [[IPA]], ptrtoint ([5 x i32]* @a to i64)
-; LE-NEXT:    store i64 [[OFFA]], i64* [[PCMP:%.*]], align 4
-; LE-NEXT:    [[PB:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 98, i64 20)
-; LE-NEXT:    [[IPB:%.*]] = ptrtoint i8* [[PB]] to i64
-; LE-NEXT:    [[OFFB:%.*]] = sub i64 [[IPB]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 19, i64* [[PCMP:%.*]], align 4
 ; LE-NEXT:    [[PSTOR1:%.*]] = getelementptr i64, i64* [[PCMP]], i64 1
-; LE-NEXT:    store i64 [[OFFB]], i64* [[PSTOR1]], align 4
-; LE-NEXT:    [[PC:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 99, i64 20)
-; LE-NEXT:    [[IPC:%.*]] = ptrtoint i8* [[PC]] to i64
-; LE-NEXT:    [[OFFC:%.*]] = sub i64 [[IPC]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 18, i64* [[PSTOR1]], align 4
 ; LE-NEXT:    [[PSTOR2:%.*]] = getelementptr i64, i64* [[PCMP]], i64 2
-; LE-NEXT:    store i64 [[OFFC]], i64* [[PSTOR2]], align 4
-; LE-NEXT:    [[PD:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 100, i64 20)
-; LE-NEXT:    [[IPD:%.*]] = ptrtoint i8* [[PD]] to i64
-; LE-NEXT:    [[OFFD:%.*]] = sub i64 [[IPD]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 17, i64* [[PSTOR2]], align 4
 ; LE-NEXT:    [[PSTOR3:%.*]] = getelementptr i64, i64* [[PCMP]], i64 3
-; LE-NEXT:    store i64 [[OFFD]], i64* [[PSTOR3]], align 4
-; LE-NEXT:    [[PE:%.*]] = call i8* @memrchr(i8* noundef nonnull dereferenceable(20) bitcast ([5 x i32]* @a to i8*), i32 101, i64 20)
-; LE-NEXT:    [[IPE:%.*]] = ptrtoint i8* [[PE]] to i64
-; LE-NEXT:    [[OFFE:%.*]] = sub i64 [[IPE]], ptrtoint ([5 x i32]* @a to i64)
+; LE-NEXT:    store i64 16, i64* [[PSTOR3]], align 4
 ; LE-NEXT:    [[PSTOR4:%.*]] = getelementptr i64, i64* [[PCMP]], i64 4
-; LE-NEXT:    store i64 [[OFFE]], i64* [[PSTOR4]], align 4
+; LE-NEXT:    store i64 7, i64* [[PSTOR4]], align 4
 ; LE-NEXT:    ret void
 ;
   %p0 = getelementptr [5 x i32], [5 x i32]* @a, i64 0, i64 0

diff  --git a/llvm/test/Transforms/InstCombine/sprintf-2.ll b/llvm/test/Transforms/InstCombine/sprintf-2.ll
index 826cde9c1929f..7af255b79dee3 100644
--- a/llvm/test/Transforms/InstCombine/sprintf-2.ll
+++ b/llvm/test/Transforms/InstCombine/sprintf-2.ll
@@ -17,32 +17,23 @@ declare i32 @snprintf(i8*, i64, i8*, ...)
 
 define void @fold_snprintf_member_pC(i32* %pi) {
 ; CHECK-LABEL: @fold_snprintf_member_pC(
-; CHECK-NEXT:    [[IA0A:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0))
-; CHECK-NEXT:    store i32 [[IA0A]], i32* [[PI:%.*]], align 4
-; CHECK-NEXT:    [[IA0AP1:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 1))
+; CHECK-NEXT:    store i32 1, i32* [[PI:%.*]], align 4
 ; CHECK-NEXT:    [[PIA0AP1:%.*]] = getelementptr i32, i32* [[PI]], i64 1
-; CHECK-NEXT:    store i32 [[IA0AP1]], i32* [[PIA0AP1]], align 4
-; CHECK-NEXT:    [[IA0B:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 0))
+; CHECK-NEXT:    store i32 0, i32* [[PIA0AP1]], align 4
 ; CHECK-NEXT:    [[PIA0B:%.*]] = getelementptr i32, i32* [[PI]], i64 2
-; CHECK-NEXT:    store i32 [[IA0B]], i32* [[PIA0B]], align 4
-; CHECK-NEXT:    [[IA0BP1:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 1))
+; CHECK-NEXT:    store i32 2, i32* [[PIA0B]], align 4
 ; CHECK-NEXT:    [[PIA0BP1:%.*]] = getelementptr i32, i32* [[PI]], i64 3
-; CHECK-NEXT:    store i32 [[IA0BP1]], i32* [[PIA0BP1]], align 4
-; CHECK-NEXT:    [[IA0BP2:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 2))
+; CHECK-NEXT:    store i32 1, i32* [[PIA0BP1]], align 4
 ; CHECK-NEXT:    [[PIA0BP2:%.*]] = getelementptr i32, i32* [[PI]], i64 4
-; CHECK-NEXT:    store i32 [[IA0BP2]], i32* [[PIA0BP2]], align 4
-; CHECK-NEXT:    [[IA0C:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 2, i64 0))
+; CHECK-NEXT:    store i32 0, i32* [[PIA0BP2]], align 4
 ; CHECK-NEXT:    [[PIA0C:%.*]] = getelementptr i32, i32* [[PI]], i64 5
-; CHECK-NEXT:    store i32 [[IA0C]], i32* [[PIA0C]], align 4
-; CHECK-NEXT:    [[IA1A:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 0))
+; CHECK-NEXT:    store i32 3, i32* [[PIA0C]], align 4
 ; CHECK-NEXT:    [[PIA1A:%.*]] = getelementptr i32, i32* [[PI]], i64 6
-; CHECK-NEXT:    store i32 [[IA1A]], i32* [[PIA1A]], align 4
-; CHECK-NEXT:    [[IA1B:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 0))
+; CHECK-NEXT:    store i32 4, i32* [[PIA1A]], align 4
 ; CHECK-NEXT:    [[PIA1B:%.*]] = getelementptr i32, i32* [[PI]], i64 7
-; CHECK-NEXT:    store i32 [[IA1B]], i32* [[PIA1B]], align 4
-; CHECK-NEXT:    [[IA1C:%.*]] = call i32 (i8*, i64, i8*, ...) @snprintf(i8* null, i64 0, i8* getelementptr inbounds ([3 x i8], [3 x i8]* @pcnt_s, i64 0, i64 0), i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 2, i64 0))
+; CHECK-NEXT:    store i32 5, i32* [[PIA1B]], align 4
 ; CHECK-NEXT:    [[PIA1C:%.*]] = getelementptr i32, i32* [[PI]], i64 8
-; CHECK-NEXT:    store i32 [[IA1C]], i32* [[PIA1C]], align 4
+; CHECK-NEXT:    store i32 6, i32* [[PIA1C]], align 4
 ; CHECK-NEXT:    ret void
 ;
   %fmt = getelementptr [3 x i8], [3 x i8]* @pcnt_s, i32 0, i32 0

diff  --git a/llvm/test/Transforms/InstCombine/str-int-3.ll b/llvm/test/Transforms/InstCombine/str-int-3.ll
index 835e045483d17..7bce9b18c71be 100644
--- a/llvm/test/Transforms/InstCombine/str-int-3.ll
+++ b/llvm/test/Transforms/InstCombine/str-int-3.ll
@@ -20,17 +20,13 @@ declare i64 @strtoll(i8*, i8**, i32)
 
 define void @fold_atoi_member(i32* %pi) {
 ; CHECK-LABEL: @fold_atoi_member(
-; CHECK-NEXT:    [[IA0A:%.*]] = call i32 @atoi(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0))
-; CHECK-NEXT:    store i32 [[IA0A]], i32* [[PI:%.*]], align 4
-; CHECK-NEXT:    [[IA0B:%.*]] = call i32 @atoi(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 0))
+; CHECK-NEXT:    store i32 1, i32* [[PI:%.*]], align 4
 ; CHECK-NEXT:    [[PIA0B:%.*]] = getelementptr i32, i32* [[PI]], i64 1
-; CHECK-NEXT:    store i32 [[IA0B]], i32* [[PIA0B]], align 4
-; CHECK-NEXT:    [[IA1A:%.*]] = call i32 @atoi(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 0))
+; CHECK-NEXT:    store i32 12, i32* [[PIA0B]], align 4
 ; CHECK-NEXT:    [[PIA1A:%.*]] = getelementptr i32, i32* [[PI]], i64 2
-; CHECK-NEXT:    store i32 [[IA1A]], i32* [[PIA1A]], align 4
-; CHECK-NEXT:    [[IA1B:%.*]] = call i32 @atoi(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 0))
+; CHECK-NEXT:    store i32 123, i32* [[PIA1A]], align 4
 ; CHECK-NEXT:    [[PIA1B:%.*]] = getelementptr i32, i32* [[PI]], i64 3
-; CHECK-NEXT:    store i32 [[IA1B]], i32* [[PIA1B]], align 4
+; CHECK-NEXT:    store i32 1234, i32* [[PIA1B]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; Fold atoi(a[0].a) to 1.
@@ -94,23 +90,17 @@ define void @call_atoi_offset_out_of_bounds(i32* %pi) {
 
 define void @fold_atol_member(i64* %pi) {
 ; CHECK-LABEL: @fold_atol_member(
-; CHECK-NEXT:    [[IA0A:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0))
-; CHECK-NEXT:    store i64 [[IA0A]], i64* [[PI:%.*]], align 4
-; CHECK-NEXT:    [[IA0B:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 0))
+; CHECK-NEXT:    store i64 1, i64* [[PI:%.*]], align 4
 ; CHECK-NEXT:    [[PIA0B:%.*]] = getelementptr i64, i64* [[PI]], i64 1
-; CHECK-NEXT:    store i64 [[IA0B]], i64* [[PIA0B]], align 4
-; CHECK-NEXT:    [[IA0C:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 2, i64 0))
+; CHECK-NEXT:    store i64 12, i64* [[PIA0B]], align 4
 ; CHECK-NEXT:    [[PIA0C:%.*]] = getelementptr i64, i64* [[PI]], i64 2
-; CHECK-NEXT:    store i64 [[IA0C]], i64* [[PIA0C]], align 4
-; CHECK-NEXT:    [[IA1A:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 0))
+; CHECK-NEXT:    store i64 56789, i64* [[PIA0C]], align 4
 ; CHECK-NEXT:    [[PIA1A:%.*]] = getelementptr i64, i64* [[PI]], i64 3
-; CHECK-NEXT:    store i64 [[IA1A]], i64* [[PIA1A]], align 4
-; CHECK-NEXT:    [[IA1B:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 0))
+; CHECK-NEXT:    store i64 123, i64* [[PIA1A]], align 4
 ; CHECK-NEXT:    [[PIA1B:%.*]] = getelementptr i64, i64* [[PI]], i64 4
-; CHECK-NEXT:    store i64 [[IA1B]], i64* [[PIA1B]], align 4
-; CHECK-NEXT:    [[IA1C:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 2, i64 0))
+; CHECK-NEXT:    store i64 1234, i64* [[PIA1B]], align 4
 ; CHECK-NEXT:    [[PIA1C:%.*]] = getelementptr i64, i64* [[PI]], i64 5
-; CHECK-NEXT:    store i64 [[IA1C]], i64* [[PIA1C]], align 4
+; CHECK-NEXT:    store i64 67890, i64* [[PIA1C]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; Fold atol(a[0].a) to 1.
@@ -158,23 +148,17 @@ define void @fold_atol_member(i64* %pi) {
 
 define void @fold_atoll_member_pC(i64* %pi) {
 ; CHECK-LABEL: @fold_atoll_member_pC(
-; CHECK-NEXT:    [[IA0A:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0))
-; CHECK-NEXT:    store i64 [[IA0A]], i64* [[PI:%.*]], align 4
-; CHECK-NEXT:    [[IA0BP1:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 1))
+; CHECK-NEXT:    store i64 1, i64* [[PI:%.*]], align 4
 ; CHECK-NEXT:    [[PIA0BP1:%.*]] = getelementptr i64, i64* [[PI]], i64 1
-; CHECK-NEXT:    store i64 [[IA0BP1]], i64* [[PIA0BP1]], align 4
-; CHECK-NEXT:    [[IA0CP3:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 2, i64 3))
+; CHECK-NEXT:    store i64 2, i64* [[PIA0BP1]], align 4
 ; CHECK-NEXT:    [[PIA0CP3:%.*]] = getelementptr i64, i64* [[PI]], i64 2
-; CHECK-NEXT:    store i64 [[IA0CP3]], i64* [[PIA0CP3]], align 4
-; CHECK-NEXT:    [[IA1AP2:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 2))
+; CHECK-NEXT:    store i64 89, i64* [[PIA0CP3]], align 4
 ; CHECK-NEXT:    [[PIA1AP2:%.*]] = getelementptr i64, i64* [[PI]], i64 3
-; CHECK-NEXT:    store i64 [[IA1AP2]], i64* [[PIA1AP2]], align 4
-; CHECK-NEXT:    [[IA1BP3:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 3))
+; CHECK-NEXT:    store i64 3, i64* [[PIA1AP2]], align 4
 ; CHECK-NEXT:    [[PIA1BP3:%.*]] = getelementptr i64, i64* [[PI]], i64 4
-; CHECK-NEXT:    store i64 [[IA1BP3]], i64* [[PIA1BP3]], align 4
-; CHECK-NEXT:    [[IA1CP4:%.*]] = call i64 @atol(i8* getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 2, i64 4))
+; CHECK-NEXT:    store i64 4, i64* [[PIA1BP3]], align 4
 ; CHECK-NEXT:    [[PIA1CP4:%.*]] = getelementptr i64, i64* [[PI]], i64 5
-; CHECK-NEXT:    store i64 [[IA1CP4]], i64* [[PIA1CP4]], align 4
+; CHECK-NEXT:    store i64 0, i64* [[PIA1CP4]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; Fold atoll(a[0].a) to 1.
@@ -222,23 +206,17 @@ define void @fold_atoll_member_pC(i64* %pi) {
 
 define void @fold_strtol_member_pC(i64* %pi) {
 ; CHECK-LABEL: @fold_strtol_member_pC(
-; CHECK-NEXT:    [[IA0A:%.*]] = call i64 @strtol(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0), i8** null, i32 0)
-; CHECK-NEXT:    store i64 [[IA0A]], i64* [[PI:%.*]], align 4
-; CHECK-NEXT:    [[IA0BP1:%.*]] = call i64 @strtol(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 1), i8** null, i32 0)
+; CHECK-NEXT:    store i64 1, i64* [[PI:%.*]], align 4
 ; CHECK-NEXT:    [[PIA0BP1:%.*]] = getelementptr i64, i64* [[PI]], i64 1
-; CHECK-NEXT:    store i64 [[IA0BP1]], i64* [[PIA0BP1]], align 4
-; CHECK-NEXT:    [[IA0CP3:%.*]] = call i64 @strtol(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 2, i64 3), i8** null, i32 0)
+; CHECK-NEXT:    store i64 2, i64* [[PIA0BP1]], align 4
 ; CHECK-NEXT:    [[PIA0CP3:%.*]] = getelementptr i64, i64* [[PI]], i64 2
-; CHECK-NEXT:    store i64 [[IA0CP3]], i64* [[PIA0CP3]], align 4
-; CHECK-NEXT:    [[IA1AP2:%.*]] = call i64 @strtol(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 2), i8** null, i32 0)
+; CHECK-NEXT:    store i64 89, i64* [[PIA0CP3]], align 4
 ; CHECK-NEXT:    [[PIA1AP2:%.*]] = getelementptr i64, i64* [[PI]], i64 3
-; CHECK-NEXT:    store i64 [[IA1AP2]], i64* [[PIA1AP2]], align 4
-; CHECK-NEXT:    [[IA1BP3:%.*]] = call i64 @strtol(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 3), i8** null, i32 0)
+; CHECK-NEXT:    store i64 3, i64* [[PIA1AP2]], align 4
 ; CHECK-NEXT:    [[PIA1BP3:%.*]] = getelementptr i64, i64* [[PI]], i64 4
-; CHECK-NEXT:    store i64 [[IA1BP3]], i64* [[PIA1BP3]], align 4
-; CHECK-NEXT:    [[IA1CP4:%.*]] = call i64 @strtol(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 2, i64 4), i8** null, i32 0)
+; CHECK-NEXT:    store i64 4, i64* [[PIA1BP3]], align 4
 ; CHECK-NEXT:    [[PIA1CP4:%.*]] = getelementptr i64, i64* [[PI]], i64 5
-; CHECK-NEXT:    store i64 [[IA1CP4]], i64* [[PIA1CP4]], align 4
+; CHECK-NEXT:    store i64 0, i64* [[PIA1CP4]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; Fold strtol(a[0].a, 0, 0) to 1.
@@ -286,23 +264,17 @@ define void @fold_strtol_member_pC(i64* %pi) {
 
 define void @fold_strtoll_member_pC(i64* %pi) {
 ; CHECK-LABEL: @fold_strtoll_member_pC(
-; CHECK-NEXT:    [[IA0A:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0), i8** null, i32 0)
-; CHECK-NEXT:    store i64 [[IA0A]], i64* [[PI:%.*]], align 4
-; CHECK-NEXT:    [[IA0BP1:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 1), i8** null, i32 0)
+; CHECK-NEXT:    store i64 1, i64* [[PI:%.*]], align 4
 ; CHECK-NEXT:    [[PIA0BP1:%.*]] = getelementptr i64, i64* [[PI]], i64 1
-; CHECK-NEXT:    store i64 [[IA0BP1]], i64* [[PIA0BP1]], align 4
-; CHECK-NEXT:    [[IA0CP3:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 2, i64 3), i8** null, i32 0)
+; CHECK-NEXT:    store i64 2, i64* [[PIA0BP1]], align 4
 ; CHECK-NEXT:    [[PIA0CP3:%.*]] = getelementptr i64, i64* [[PI]], i64 2
-; CHECK-NEXT:    store i64 [[IA0CP3]], i64* [[PIA0CP3]], align 4
-; CHECK-NEXT:    [[IA1AP2:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 2), i8** null, i32 0)
+; CHECK-NEXT:    store i64 89, i64* [[PIA0CP3]], align 4
 ; CHECK-NEXT:    [[PIA1AP2:%.*]] = getelementptr i64, i64* [[PI]], i64 3
-; CHECK-NEXT:    store i64 [[IA1AP2]], i64* [[PIA1AP2]], align 4
-; CHECK-NEXT:    [[IA1BP3:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 3), i8** null, i32 0)
+; CHECK-NEXT:    store i64 3, i64* [[PIA1AP2]], align 4
 ; CHECK-NEXT:    [[PIA1BP3:%.*]] = getelementptr i64, i64* [[PI]], i64 4
-; CHECK-NEXT:    store i64 [[IA1BP3]], i64* [[PIA1BP3]], align 4
-; CHECK-NEXT:    [[IA1CP4:%.*]] = call i64 @strtoll(i8* nocapture getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 2, i64 4), i8** null, i32 0)
+; CHECK-NEXT:    store i64 4, i64* [[PIA1BP3]], align 4
 ; CHECK-NEXT:    [[PIA1CP4:%.*]] = getelementptr i64, i64* [[PI]], i64 5
-; CHECK-NEXT:    store i64 [[IA1CP4]], i64* [[PIA1CP4]], align 4
+; CHECK-NEXT:    store i64 0, i64* [[PIA1CP4]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; Fold strtoll(a[0].a, 0, 0) to 1.

diff  --git a/llvm/test/Transforms/InstCombine/strcmp-3.ll b/llvm/test/Transforms/InstCombine/strcmp-3.ll
index 499fed31ec396..166d3d55fa026 100644
--- a/llvm/test/Transforms/InstCombine/strcmp-3.ll
+++ b/llvm/test/Transforms/InstCombine/strcmp-3.ll
@@ -12,8 +12,7 @@ declare i32 @strcmp(i8*, i8*)
 
 define i32 @fold_strcmp_a5i0_a5i1_to_0() {
 ; CHECK-LABEL: @fold_strcmp_a5i0_a5i1_to_0(
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 0))
-; CHECK-NEXT:    ret i32 [[CMP]]
+; CHECK-NEXT:    ret i32 0
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
   %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 0
@@ -28,7 +27,7 @@ define i32 @fold_strcmp_a5i0_a5i1_to_0() {
 define i32 @call_strcmp_a5i0_a5iI(i64 %I) {
 ; CHECK-LABEL: @call_strcmp_a5i0_a5iI(
 ; CHECK-NEXT:    [[Q:%.*]] = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 [[I:%.*]], i64 0
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) [[Q]])
+; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) [[Q]])
 ; CHECK-NEXT:    ret i32 [[CMP]]
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
@@ -44,7 +43,7 @@ define i32 @call_strcmp_a5i0_a5iI(i64 %I) {
 define i32 @call_strcmp_a5iI_a5i0(i64 %I) {
 ; CHECK-LABEL: @call_strcmp_a5iI_a5i0(
 ; CHECK-NEXT:    [[P:%.*]] = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 [[I:%.*]], i64 0
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) [[P]], i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0))
+; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) [[P]], i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0))
 ; CHECK-NEXT:    ret i32 [[CMP]]
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 %I, i64 0
@@ -59,8 +58,7 @@ define i32 @call_strcmp_a5iI_a5i0(i64 %I) {
 
 define i32 @fold_strcmp_a5i0_a5i1_p1_to_0() {
 ; CHECK-LABEL: @fold_strcmp_a5i0_a5i1_p1_to_0(
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 1))
-; CHECK-NEXT:    ret i32 [[CMP]]
+; CHECK-NEXT:    ret i32 -1
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
   %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 1
@@ -75,7 +73,7 @@ define i32 @fold_strcmp_a5i0_a5i1_p1_to_0() {
 define i32 @call_strcmp_a5i0_a5i1_pI(i64 %I) {
 ; CHECK-LABEL: @call_strcmp_a5i0_a5i1_pI(
 ; CHECK-NEXT:    [[Q:%.*]] = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 [[I:%.*]]
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) [[Q]])
+; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(4) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) [[Q]])
 ; CHECK-NEXT:    ret i32 [[CMP]]
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
@@ -90,8 +88,7 @@ define i32 @call_strcmp_a5i0_a5i1_pI(i64 %I) {
 
 define i32 @fold_strcmp_a5i0_p1_a5i1_to_0() {
 ; CHECK-LABEL: @fold_strcmp_a5i0_p1_a5i1_to_0(
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 1), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 0))
-; CHECK-NEXT:    ret i32 [[CMP]]
+; CHECK-NEXT:    ret i32 1
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 1
   %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 1, i64 0
@@ -105,8 +102,7 @@ define i32 @fold_strcmp_a5i0_p1_a5i1_to_0() {
 
 define i32 @fold_strcmp_a5i0_a5i2_to_0() {
 ; CHECK-LABEL: @fold_strcmp_a5i0_a5i2_to_0(
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 2, i64 0))
-; CHECK-NEXT:    ret i32 [[CMP]]
+; CHECK-NEXT:    ret i32 1
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
   %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 2, i64 0
@@ -120,8 +116,7 @@ define i32 @fold_strcmp_a5i0_a5i2_to_0() {
 
 define i32 @fold_strcmp_a5i2_a5i0_to_m1() {
 ; CHECK-LABEL: @fold_strcmp_a5i2_a5i0_to_m1(
-; CHECK-NEXT:    [[CMP:%.*]] = call i32 @strcmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 2, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0))
-; CHECK-NEXT:    ret i32 [[CMP]]
+; CHECK-NEXT:    ret i32 -1
 ;
   %p = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 0, i64 0
   %q = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5, i64 0, i64 2, i64 0

diff  --git a/llvm/test/Transforms/InstCombine/strlen-5.ll b/llvm/test/Transforms/InstCombine/strlen-5.ll
index db8a10d82de24..ea9f2bb48726f 100644
--- a/llvm/test/Transforms/InstCombine/strlen-5.ll
+++ b/llvm/test/Transforms/InstCombine/strlen-5.ll
@@ -12,8 +12,7 @@ declare i64 @strlen(i8*)
 
 define i64 @fold_a5_4_i0_to_3() {
 ; CHECK-LABEL: @fold_a5_4_i0_to_3(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 0))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 3
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 0
   %len = call i64 @strlen(i8* %ptr)
@@ -25,8 +24,7 @@ define i64 @fold_a5_4_i0_to_3() {
 
 define i64 @fold_a5_4_i0_p1_to_2() {
 ; CHECK-LABEL: @fold_a5_4_i0_p1_to_2(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 1))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 2
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 1
   %len = call i64 @strlen(i8* %ptr)
@@ -38,8 +36,7 @@ define i64 @fold_a5_4_i0_p1_to_2() {
 
 define i64 @fold_a5_4_i0_p2_to_1() {
 ; CHECK-LABEL: @fold_a5_4_i0_p2_to_1(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 2))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 1
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 2
   %len = call i64 @strlen(i8* %ptr)
@@ -51,8 +48,7 @@ define i64 @fold_a5_4_i0_p2_to_1() {
 
 define i64 @fold_a5_4_i0_p3_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i0_p3_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 3))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 0, i64 3
   %len = call i64 @strlen(i8* %ptr)
@@ -64,8 +60,7 @@ define i64 @fold_a5_4_i0_p3_to_0() {
 
 define i64 @fold_a5_4_i1_to_2() {
 ; CHECK-LABEL: @fold_a5_4_i1_to_2(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 0))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 2
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 0
   %len = call i64 @strlen(i8* %ptr)
@@ -77,8 +72,7 @@ define i64 @fold_a5_4_i1_to_2() {
 
 define i64 @fold_a5_4_i1_p1_to_1() {
 ; CHECK-LABEL: @fold_a5_4_i1_p1_to_1(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 1))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 1
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 1
   %len = call i64 @strlen(i8* %ptr)
@@ -90,8 +84,7 @@ define i64 @fold_a5_4_i1_p1_to_1() {
 
 define i64 @fold_a5_4_i1_p2_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i1_p2_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 2))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 2
   %len = call i64 @strlen(i8* %ptr)
@@ -103,8 +96,7 @@ define i64 @fold_a5_4_i1_p2_to_0() {
 
 define i64 @fold_a5_4_i1_p3_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i1_p3_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 3))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 1, i64 3
   %len = call i64 @strlen(i8* %ptr)
@@ -116,8 +108,7 @@ define i64 @fold_a5_4_i1_p3_to_0() {
 
 define i64 @fold_a5_4_i2_to_1() {
 ; CHECK-LABEL: @fold_a5_4_i2_to_1(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 0))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 1
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 0
   %len = call i64 @strlen(i8* %ptr)
@@ -129,8 +120,7 @@ define i64 @fold_a5_4_i2_to_1() {
 
 define i64 @fold_a5_4_i2_p1_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i2_p1_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 1))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 1
   %len = call i64 @strlen(i8* %ptr)
@@ -142,8 +132,7 @@ define i64 @fold_a5_4_i2_p1_to_0() {
 
 define i64 @fold_a5_4_i2_p2_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i2_p2_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 2))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 2
   %len = call i64 @strlen(i8* %ptr)
@@ -155,8 +144,7 @@ define i64 @fold_a5_4_i2_p2_to_0() {
 
 define i64 @fold_a5_4_i2_p3_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i2_p3_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 3))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 3
   %len = call i64 @strlen(i8* %ptr)
@@ -168,8 +156,7 @@ define i64 @fold_a5_4_i2_p3_to_0() {
 
 define i64 @fold_a5_4_i3_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i3_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 0))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 0
   %len = call i64 @strlen(i8* %ptr)
@@ -181,8 +168,7 @@ define i64 @fold_a5_4_i3_to_0() {
 
 define i64 @fold_a5_4_i3_p1_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i3_p1_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 1))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 1
   %len = call i64 @strlen(i8* %ptr)
@@ -194,8 +180,7 @@ define i64 @fold_a5_4_i3_p1_to_0() {
 
 define i64 @fold_a5_4_i3_p2_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i3_p2_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 2))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 2
   %len = call i64 @strlen(i8* %ptr)
@@ -207,8 +192,7 @@ define i64 @fold_a5_4_i3_p2_to_0() {
 
 define i64 @fold_a5_3_i4_p3_to_0() {
 ; CHECK-LABEL: @fold_a5_3_i4_p3_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 3))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 3, i64 3
   %len = call i64 @strlen(i8* %ptr)
@@ -220,8 +204,7 @@ define i64 @fold_a5_3_i4_p3_to_0() {
 
 define i64 @fold_a5_4_i4_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i4_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 0))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 0
   %len = call i64 @strlen(i8* %ptr)
@@ -233,8 +216,7 @@ define i64 @fold_a5_4_i4_to_0() {
 
 define i64 @fold_a5_4_i4_p1_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i4_p1_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 1))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 1
   %len = call i64 @strlen(i8* %ptr)
@@ -246,8 +228,7 @@ define i64 @fold_a5_4_i4_p1_to_0() {
 
 define i64 @fold_a5_4_i4_p2_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i4_p2_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 2))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 2
   %len = call i64 @strlen(i8* %ptr)
@@ -259,8 +240,7 @@ define i64 @fold_a5_4_i4_p2_to_0() {
 
 define i64 @fold_a5_4_i4_p3_to_0() {
 ; CHECK-LABEL: @fold_a5_4_i4_p3_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 3))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 4, i64 3
   %len = call i64 @strlen(i8* %ptr)

diff  --git a/llvm/test/Transforms/InstCombine/strlen-6.ll b/llvm/test/Transforms/InstCombine/strlen-6.ll
index 42c755260b15b..2ccd4f3ae5a2c 100644
--- a/llvm/test/Transforms/InstCombine/strlen-6.ll
+++ b/llvm/test/Transforms/InstCombine/strlen-6.ll
@@ -31,8 +31,7 @@ declare i64 @strlen(i8*)
 
 define i64 @fold_strlen_a_S3_to_3() {
 ; CHECK-LABEL: @fold_strlen_a_S3_to_3(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4:%.*]], %struct.A_a4* @a_s3, i64 0, i32 0, i64 0))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 3
 ;
   %ptr = getelementptr %struct.A_a4, %struct.A_a4* @a_s3, i32 0, i32 0, i32 0
   %len = call i64 @strlen(i8* %ptr)
@@ -44,8 +43,7 @@ define i64 @fold_strlen_a_S3_to_3() {
 
 define i64 @fold_strlen_a_S3_p1_to_2() {
 ; CHECK-LABEL: @fold_strlen_a_S3_p1_to_2(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4:%.*]], %struct.A_a4* @a_s3, i64 0, i32 0, i64 1))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 2
 ;
   %ptr = getelementptr %struct.A_a4, %struct.A_a4* @a_s3, i32 0, i32 0, i32 1
   %len = call i64 @strlen(i8* %ptr)
@@ -57,8 +55,7 @@ define i64 @fold_strlen_a_S3_p1_to_2() {
 
 define i64 @fold_strlen_a_S3_p2_to_1() {
 ; CHECK-LABEL: @fold_strlen_a_S3_p2_to_1(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4:%.*]], %struct.A_a4* @a_s3, i64 0, i32 0, i64 2))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 1
 ;
   %ptr = getelementptr %struct.A_a4, %struct.A_a4* @a_s3, i32 0, i32 0, i32 2
   %len = call i64 @strlen(i8* %ptr)
@@ -70,8 +67,7 @@ define i64 @fold_strlen_a_S3_p2_to_1() {
 
 define i64 @fold_strlen_a_S3_p3_to_0() {
 ; CHECK-LABEL: @fold_strlen_a_S3_p3_to_0(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4:%.*]], %struct.A_a4* @a_s3, i64 0, i32 0, i64 3))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 0
 ;
   %ptr = getelementptr %struct.A_a4, %struct.A_a4* @a_s3, i32 0, i32 0, i32 3
   %len = call i64 @strlen(i8* %ptr)
@@ -83,8 +79,7 @@ define i64 @fold_strlen_a_S3_p3_to_0() {
 
 define i64 @fold_strlen_a_S3_s4_to_3() {
 ; CHECK-LABEL: @fold_strlen_a_S3_s4_to_3(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_A5:%.*]], %struct.A_a4_a5* @a_s3_s4, i64 0, i32 0, i64 0))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 3
 ;
   %ptr = getelementptr %struct.A_a4_a5, %struct.A_a4_a5* @a_s3_s4, i32 0, i32 0, i32 0
   %len = call i64 @strlen(i8* %ptr)
@@ -96,8 +91,7 @@ define i64 @fold_strlen_a_S3_s4_to_3() {
 
 define i64 @fold_strlen_a_S3_p2_s4_to_1() {
 ; CHECK-LABEL: @fold_strlen_a_S3_p2_s4_to_1(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_A5:%.*]], %struct.A_a4_a5* @a_s3_s4, i64 0, i32 0, i64 2))
-; CHECK-NEXT:    ret i64 [[LEN]]
+; CHECK-NEXT:    ret i64 1
 ;
   %ptr = getelementptr %struct.A_a4_a5, %struct.A_a4_a5* @a_s3_s4, i32 0, i32 0, i32 2
   %len = call i64 @strlen(i8* %ptr)
@@ -110,10 +104,8 @@ define i64 @fold_strlen_a_S3_p2_s4_to_1() {
 
 define void @fold_strlen_a_s3_S4_to_4() {
 ; CHECK-LABEL: @fold_strlen_a_s3_S4_to_4(
-; CHECK-NEXT:    [[LEN1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_A5:%.*]], %struct.A_a4_a5* @a_s3_s4, i64 0, i32 1, i64 0))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_A5]], %struct.A_a4_a5* @a_s3_s4, i64 0, i32 1, i64 0))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 4, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 4, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
 ; CHECK-NEXT:    ret void
 ;
   %p1 = getelementptr %struct.A_a4_a5, %struct.A_a4_a5* @a_s3_s4, i32 0, i32 0, i32 4
@@ -135,10 +127,8 @@ define void @fold_strlen_a_s3_S4_to_4() {
 
 define void @fold_strlen_a_s3_S4_p1_to_3() {
 ; CHECK-LABEL: @fold_strlen_a_s3_S4_p1_to_3(
-; CHECK-NEXT:    [[LEN1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_A5:%.*]], %struct.A_a4_a5* @a_s3_s4, i64 0, i32 1, i64 1))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_A5]], %struct.A_a4_a5* @a_s3_s4, i64 0, i32 1, i64 1))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 3, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 3, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
 ; CHECK-NEXT:    ret void
 ;
   %p1 = getelementptr %struct.A_a4_a5, %struct.A_a4_a5* @a_s3_s4, i32 0, i32 0, i32 5
@@ -160,10 +150,8 @@ define void @fold_strlen_a_s3_S4_p1_to_3() {
 
 define void @fold_strlen_a_s3_i32_S4_to_4() {
 ; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_to_4(
-; CHECK-NEXT:    [[LEN1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5:%.*]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 0))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 0))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 4, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 4, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
 ; CHECK-NEXT:    ret void
 ;
   %p1 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 0, i32 8
@@ -185,10 +173,8 @@ define void @fold_strlen_a_s3_i32_S4_to_4() {
 
 define void @fold_strlen_a_s3_i32_S4_p1_to_3() {
 ; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p1_to_3(
-; CHECK-NEXT:    [[LEN1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5:%.*]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 1))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 0))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 3, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 3, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
 ; CHECK-NEXT:    ret void
 ;
   %p1 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 0, i32 9
@@ -210,10 +196,8 @@ define void @fold_strlen_a_s3_i32_S4_p1_to_3() {
 
 define void @fold_strlen_a_s3_i32_S4_p2_to_2() {
 ; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p2_to_2(
-; CHECK-NEXT:    [[LEN1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5:%.*]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 2))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 2))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 2, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 2, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
 ; CHECK-NEXT:    ret void
 ;
   %p1 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 0, i32 10
@@ -235,10 +219,8 @@ define void @fold_strlen_a_s3_i32_S4_p2_to_2() {
 
 define void @fold_strlen_a_s3_i32_S4_p3_to_1() {
 ; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p3_to_1(
-; CHECK-NEXT:    [[LEN1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5:%.*]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 3))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 3))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 1, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 1, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
 ; CHECK-NEXT:    ret void
 ;
   %p1 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 0, i32 11
@@ -260,10 +242,8 @@ define void @fold_strlen_a_s3_i32_S4_p3_to_1() {
 
 define void @fold_strlen_a_s3_i32_S4_p4_to_0() {
 ; CHECK-LABEL: @fold_strlen_a_s3_i32_S4_p4_to_0(
-; CHECK-NEXT:    [[LEN1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5:%.*]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 4))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A_A4_I32_A5]], %struct.A_a4_i32_a5* @a_s3_i32_s4, i64 0, i32 2, i64 4))
-; CHECK-NEXT:    store i64 [[LEN1]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 0, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 0, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
 ; CHECK-NEXT:    ret void
 ;
   %p1 = getelementptr %struct.A_a4_i32_a5, %struct.A_a4_i32_a5* @a_s3_i32_s4, i32 0, i32 0, i32 12
@@ -285,12 +265,9 @@ define void @fold_strlen_a_s3_i32_S4_p4_to_0() {
 
 define void @fold_strlen_ax_s() {
 ; CHECK-LABEL: @fold_strlen_ax_s(
-; CHECK-NEXT:    [[LEN3:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ({ i8, [4 x i8] }, { i8, [4 x i8] }* @ax_s3, i64 0, i32 1, i64 0))
-; CHECK-NEXT:    store i64 [[LEN3]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
-; CHECK-NEXT:    [[LEN5:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ({ i16, [6 x i8] }, { i16, [6 x i8] }* @ax_s5, i64 0, i32 1, i64 0))
-; CHECK-NEXT:    store i64 [[LEN5]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
-; CHECK-NEXT:    [[LEN7:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ({ i32, i32, [8 x i8] }, { i32, i32, [8 x i8] }* @ax_s7, i64 0, i32 2, i64 0))
-; CHECK-NEXT:    store i64 [[LEN7]], i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 2), align 4
+; CHECK-NEXT:    store i64 3, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 0), align 4
+; CHECK-NEXT:    store i64 5, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 1), align 4
+; CHECK-NEXT:    store i64 7, i64* getelementptr inbounds ([0 x i64], [0 x i64]* @ax, i64 0, i64 2), align 4
 ; CHECK-NEXT:    ret void
 ;
   %pax_s3 = getelementptr { i8, [4 x i8] }, { i8, [4 x i8] }* @ax_s3, i64 0, i32 1, i64 0

diff  --git a/llvm/test/Transforms/InstCombine/strlen-7.ll b/llvm/test/Transforms/InstCombine/strlen-7.ll
index 61801770d4831..14486a8793d85 100644
--- a/llvm/test/Transforms/InstCombine/strlen-7.ll
+++ b/llvm/test/Transforms/InstCombine/strlen-7.ll
@@ -15,59 +15,41 @@ declare i64 @strlen(i8*)
 
 define void @fold_strlen_A(i64* %plen) {
 ; CHECK-LABEL: @fold_strlen_A(
-; CHECK-NEXT:    [[LENA0A:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 0))
-; CHECK-NEXT:    store i64 [[LENA0A]], i64* [[PLEN:%.*]], align 4
-; CHECK-NEXT:    [[LENA0AP1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 1))
+; CHECK-NEXT:    store i64 1, i64* [[PLEN:%.*]], align 4
 ; CHECK-NEXT:    [[PLEN1:%.*]] = getelementptr i64, i64* [[PLEN]], i64 1
-; CHECK-NEXT:    store i64 [[LENA0AP1]], i64* [[PLEN1]], align 4
-; CHECK-NEXT:    [[LENA0AP2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 2))
+; CHECK-NEXT:    store i64 0, i64* [[PLEN1]], align 4
 ; CHECK-NEXT:    [[PLEN2:%.*]] = getelementptr i64, i64* [[PLEN]], i64 2
-; CHECK-NEXT:    store i64 [[LENA0AP2]], i64* [[PLEN2]], align 4
-; CHECK-NEXT:    [[LENA0AP3:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 0, i64 3))
+; CHECK-NEXT:    store i64 0, i64* [[PLEN2]], align 4
 ; CHECK-NEXT:    [[PLEN3:%.*]] = getelementptr i64, i64* [[PLEN]], i64 3
-; CHECK-NEXT:    store i64 [[LENA0AP3]], i64* [[PLEN3]], align 4
-; CHECK-NEXT:    [[LENA0B:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 0))
+; CHECK-NEXT:    store i64 0, i64* [[PLEN3]], align 4
 ; CHECK-NEXT:    [[PLEN4:%.*]] = getelementptr i64, i64* [[PLEN]], i64 4
-; CHECK-NEXT:    store i64 [[LENA0B]], i64* [[PLEN4]], align 4
-; CHECK-NEXT:    [[LENA0BP1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 1))
+; CHECK-NEXT:    store i64 2, i64* [[PLEN4]], align 4
 ; CHECK-NEXT:    [[PLEN5:%.*]] = getelementptr i64, i64* [[PLEN]], i64 5
-; CHECK-NEXT:    store i64 [[LENA0BP1]], i64* [[PLEN5]], align 4
-; CHECK-NEXT:    [[LENA0BP2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 2))
+; CHECK-NEXT:    store i64 1, i64* [[PLEN5]], align 4
 ; CHECK-NEXT:    [[PLEN6:%.*]] = getelementptr i64, i64* [[PLEN]], i64 6
-; CHECK-NEXT:    store i64 [[LENA0BP2]], i64* [[PLEN6]], align 4
-; CHECK-NEXT:    [[LENA0BP3:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 3))
+; CHECK-NEXT:    store i64 0, i64* [[PLEN6]], align 4
 ; CHECK-NEXT:    [[PLEN7:%.*]] = getelementptr i64, i64* [[PLEN]], i64 7
-; CHECK-NEXT:    store i64 [[LENA0BP3]], i64* [[PLEN7]], align 4
-; CHECK-NEXT:    [[LENA0BP4:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 0, i32 1, i64 4))
+; CHECK-NEXT:    store i64 0, i64* [[PLEN7]], align 4
 ; CHECK-NEXT:    [[PLEN8:%.*]] = getelementptr i64, i64* [[PLEN]], i64 8
-; CHECK-NEXT:    store i64 [[LENA0BP4]], i64* [[PLEN8]], align 4
-; CHECK-NEXT:    [[LENA1A:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 0))
+; CHECK-NEXT:    store i64 0, i64* [[PLEN8]], align 4
 ; CHECK-NEXT:    [[PLEN9:%.*]] = getelementptr i64, i64* [[PLEN]], i64 9
-; CHECK-NEXT:    store i64 [[LENA1A]], i64* [[PLEN9]], align 4
-; CHECK-NEXT:    [[LENA1AP1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 1))
+; CHECK-NEXT:    store i64 3, i64* [[PLEN9]], align 4
 ; CHECK-NEXT:    [[PLEN10:%.*]] = getelementptr i64, i64* [[PLEN]], i64 10
-; CHECK-NEXT:    store i64 [[LENA1AP1]], i64* [[PLEN10]], align 4
-; CHECK-NEXT:    [[LENA1AP2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 2))
+; CHECK-NEXT:    store i64 2, i64* [[PLEN10]], align 4
 ; CHECK-NEXT:    [[PLEN11:%.*]] = getelementptr i64, i64* [[PLEN]], i64 11
-; CHECK-NEXT:    store i64 [[LENA1AP2]], i64* [[PLEN11]], align 4
-; CHECK-NEXT:    [[LENA1AP3:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 0, i64 3))
+; CHECK-NEXT:    store i64 1, i64* [[PLEN11]], align 4
 ; CHECK-NEXT:    [[PLEN12:%.*]] = getelementptr i64, i64* [[PLEN]], i64 12
-; CHECK-NEXT:    store i64 [[LENA1AP3]], i64* [[PLEN12]], align 4
-; CHECK-NEXT:    [[LENA1B:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 0))
+; CHECK-NEXT:    store i64 0, i64* [[PLEN12]], align 4
 ; CHECK-NEXT:    [[PLEN14:%.*]] = getelementptr i64, i64* [[PLEN]], i64 14
-; CHECK-NEXT:    store i64 [[LENA1B]], i64* [[PLEN14]], align 4
-; CHECK-NEXT:    [[LENA1BP1:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 1))
+; CHECK-NEXT:    store i64 4, i64* [[PLEN14]], align 4
 ; CHECK-NEXT:    [[PLEN15:%.*]] = getelementptr i64, i64* [[PLEN]], i64 15
-; CHECK-NEXT:    store i64 [[LENA1BP1]], i64* [[PLEN15]], align 4
-; CHECK-NEXT:    [[LENA1BP2:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 2))
+; CHECK-NEXT:    store i64 3, i64* [[PLEN15]], align 4
 ; CHECK-NEXT:    [[PLEN16:%.*]] = getelementptr i64, i64* [[PLEN]], i64 16
-; CHECK-NEXT:    store i64 [[LENA1BP2]], i64* [[PLEN16]], align 4
-; CHECK-NEXT:    [[LENA1BP3:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 3))
+; CHECK-NEXT:    store i64 2, i64* [[PLEN16]], align 4
 ; CHECK-NEXT:    [[PLEN17:%.*]] = getelementptr i64, i64* [[PLEN]], i64 17
-; CHECK-NEXT:    store i64 [[LENA1BP3]], i64* [[PLEN17]], align 4
-; CHECK-NEXT:    [[LENA1BP4:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([2 x %struct.A], [2 x %struct.A]* @a, i64 0, i64 1, i32 1, i64 4))
+; CHECK-NEXT:    store i64 1, i64* [[PLEN17]], align 4
 ; CHECK-NEXT:    [[PLEN18:%.*]] = getelementptr i64, i64* [[PLEN]], i64 18
-; CHECK-NEXT:    store i64 [[LENA1BP4]], i64* [[PLEN18]], align 4
+; CHECK-NEXT:    store i64 0, i64* [[PLEN18]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; Fold strlen(a[0].a) to 1.

diff  --git a/llvm/test/Transforms/InstCombine/strlen-8.ll b/llvm/test/Transforms/InstCombine/strlen-8.ll
index fc218626e81b6..a59d6ab51d371 100644
--- a/llvm/test/Transforms/InstCombine/strlen-8.ll
+++ b/llvm/test/Transforms/InstCombine/strlen-8.ll
@@ -2,6 +2,9 @@
 ; Verify that strlen calls with variable offsets into elements of constant
 ; arrays are folded.
 ;
+; TODO: None of these cases is folded at the moment due to a limitation
+; in LibCallSimplifier::optimizeStringLength.
+;
 ; RUN: opt < %s -passes=instcombine -S | FileCheck %s
 
 declare i64 @strlen(i8*)
@@ -41,10 +44,11 @@ define i64 @fold_a5_4_i1_pI(i64 %I) {
 
 define i64 @fold_a5_4_i2_pI(i64 %I) {
 ; CHECK-LABEL: @fold_a5_4_i2_pI(
-; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 1))
+; CHECK-NEXT:    [[PTR:%.*]] = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 [[I:%.*]]
+; CHECK-NEXT:    [[LEN:%.*]] = call i64 @strlen(i8* noundef nonnull dereferenceable(1) [[PTR]])
 ; CHECK-NEXT:    ret i64 [[LEN]]
 ;
-  %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 1
+  %ptr = getelementptr [5 x [4 x i8]], [5 x [4 x i8]]* @a5_4, i64 0, i64 2, i64 %I
   %len = call i64 @strlen(i8* %ptr)
   ret i64 %len
 }

diff  --git a/llvm/test/Transforms/InstCombine/strncmp-4.ll b/llvm/test/Transforms/InstCombine/strncmp-4.ll
index fc02bb7e48902..6d1348de68354 100644
--- a/llvm/test/Transforms/InstCombine/strncmp-4.ll
+++ b/llvm/test/Transforms/InstCombine/strncmp-4.ll
@@ -15,30 +15,22 @@ define void @fold_strncmp_Aa_b(i32* %pcmp) {
 ; CHECK-NEXT:    store i32 0, i32* [[PCMP:%.*]], align 4
 ; CHECK-NEXT:    [[PCMP1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
 ; CHECK-NEXT:    store i32 0, i32* [[PCMP1]], align 4
-; CHECK-NEXT:    [[CMP2:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A:%.*]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 2)
 ; CHECK-NEXT:    [[PCMP2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; CHECK-NEXT:    store i32 [[CMP2]], i32* [[PCMP2]], align 4
-; CHECK-NEXT:    [[CMP3:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 3)
+; CHECK-NEXT:    store i32 0, i32* [[PCMP2]], align 4
 ; CHECK-NEXT:    [[PCMP3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; CHECK-NEXT:    store i32 [[CMP3]], i32* [[PCMP3]], align 4
-; CHECK-NEXT:    [[CMP4:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 4)
+; CHECK-NEXT:    store i32 0, i32* [[PCMP3]], align 4
 ; CHECK-NEXT:    [[PCMP4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; CHECK-NEXT:    store i32 [[CMP4]], i32* [[PCMP4]], align 4
-; CHECK-NEXT:    [[CMP5:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 5)
+; CHECK-NEXT:    store i32 0, i32* [[PCMP4]], align 4
 ; CHECK-NEXT:    [[PCMP5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; CHECK-NEXT:    store i32 [[CMP5]], i32* [[PCMP5]], align 4
-; CHECK-NEXT:    [[CMP6:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 6)
+; CHECK-NEXT:    store i32 0, i32* [[PCMP5]], align 4
 ; CHECK-NEXT:    [[PCMP6:%.*]] = getelementptr i32, i32* [[PCMP]], i64 6
-; CHECK-NEXT:    store i32 [[CMP6]], i32* [[PCMP6]], align 4
-; CHECK-NEXT:    [[CMP7:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 7)
+; CHECK-NEXT:    store i32 0, i32* [[PCMP6]], align 4
 ; CHECK-NEXT:    [[PCMP7:%.*]] = getelementptr i32, i32* [[PCMP]], i64 7
-; CHECK-NEXT:    store i32 [[CMP7]], i32* [[PCMP7]], align 4
-; CHECK-NEXT:    [[CMP8:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 8)
+; CHECK-NEXT:    store i32 -1, i32* [[PCMP7]], align 4
 ; CHECK-NEXT:    [[PCMP8:%.*]] = getelementptr i32, i32* [[PCMP]], i64 8
-; CHECK-NEXT:    store i32 [[CMP8]], i32* [[PCMP8]], align 4
-; CHECK-NEXT:    [[CMP9:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 0), i64 9)
+; CHECK-NEXT:    store i32 -1, i32* [[PCMP8]], align 4
 ; CHECK-NEXT:    [[PCMP9:%.*]] = getelementptr i32, i32* [[PCMP]], i64 9
-; CHECK-NEXT:    store i32 [[CMP9]], i32* [[PCMP9]], align 4
+; CHECK-NEXT:    store i32 -1, i32* [[PCMP9]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; p1 = a.a
@@ -110,18 +102,14 @@ define void @fold_strncmp_Ab_a(i32* %pcmp) {
 ; CHECK-NEXT:    store i32 0, i32* [[PCMP:%.*]], align 4
 ; CHECK-NEXT:    [[PCMP1:%.*]] = getelementptr i32, i32* [[PCMP]], i64 1
 ; CHECK-NEXT:    store i32 0, i32* [[PCMP1]], align 4
-; CHECK-NEXT:    [[CMP2:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A:%.*]], %struct.A* @a, i64 0, i32 1, i64 3), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i64 2)
 ; CHECK-NEXT:    [[PCMP2:%.*]] = getelementptr i32, i32* [[PCMP]], i64 2
-; CHECK-NEXT:    store i32 [[CMP2]], i32* [[PCMP2]], align 4
-; CHECK-NEXT:    [[CMP3:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 3), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i64 3)
+; CHECK-NEXT:    store i32 0, i32* [[PCMP2]], align 4
 ; CHECK-NEXT:    [[PCMP3:%.*]] = getelementptr i32, i32* [[PCMP]], i64 3
-; CHECK-NEXT:    store i32 [[CMP3]], i32* [[PCMP3]], align 4
-; CHECK-NEXT:    [[CMP4:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 3), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i64 4)
+; CHECK-NEXT:    store i32 0, i32* [[PCMP3]], align 4
 ; CHECK-NEXT:    [[PCMP4:%.*]] = getelementptr i32, i32* [[PCMP]], i64 4
-; CHECK-NEXT:    store i32 [[CMP4]], i32* [[PCMP4]], align 4
-; CHECK-NEXT:    [[CMP5:%.*]] = call i32 @strncmp(i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 1, i64 3), i8* noundef nonnull dereferenceable(1) getelementptr inbounds ([[STRUCT_A]], %struct.A* @a, i64 0, i32 0, i64 0), i64 5)
+; CHECK-NEXT:    store i32 1, i32* [[PCMP4]], align 4
 ; CHECK-NEXT:    [[PCMP5:%.*]] = getelementptr i32, i32* [[PCMP]], i64 5
-; CHECK-NEXT:    store i32 [[CMP5]], i32* [[PCMP5]], align 4
+; CHECK-NEXT:    store i32 1, i32* [[PCMP5]], align 4
 ; CHECK-NEXT:    ret void
 ;
 ; p1 = &a.b[3]

diff  --git a/llvm/test/Transforms/InstCombine/wcslen-3.ll b/llvm/test/Transforms/InstCombine/wcslen-3.ll
index b1546f5f03c8f..64be307c87f22 100644
--- a/llvm/test/Transforms/InstCombine/wcslen-3.ll
+++ b/llvm/test/Transforms/InstCombine/wcslen-3.ll
@@ -68,8 +68,8 @@ define i1 @test_simplify5() {
 
 define i1 @test_simplify6(i16* %str_p) {
 ; CHECK-LABEL: @test_simplify6(
-; CHECK-NEXT:    [[STRLENFIRST:%.*]] = load i16, i16* [[STR_P:%.*]], align 2
-; CHECK-NEXT:    [[EQ_NULL:%.*]] = icmp eq i16 [[STRLENFIRST]], 0
+; CHECK-NEXT:    [[CHAR0:%.*]] = load i16, i16* [[STR_P:%.*]], align 2
+; CHECK-NEXT:    [[EQ_NULL:%.*]] = icmp eq i16 [[CHAR0]], 0
 ; CHECK-NEXT:    ret i1 [[EQ_NULL]]
 ;
   %str_l = call i64 @wcslen(i16* %str_p)
@@ -91,8 +91,8 @@ define i1 @test_simplify7() {
 
 define i1 @test_simplify8(i16* %str_p) {
 ; CHECK-LABEL: @test_simplify8(
-; CHECK-NEXT:    [[STRLENFIRST:%.*]] = load i16, i16* [[STR_P:%.*]], align 2
-; CHECK-NEXT:    [[NE_NULL:%.*]] = icmp ne i16 [[STRLENFIRST]], 0
+; CHECK-NEXT:    [[CHAR0:%.*]] = load i16, i16* [[STR_P:%.*]], align 2
+; CHECK-NEXT:    [[NE_NULL:%.*]] = icmp ne i16 [[CHAR0]], 0
 ; CHECK-NEXT:    ret i1 [[NE_NULL]]
 ;
   %str_l = call i64 @wcslen(i16* %str_p)
@@ -185,12 +185,11 @@ define i64 @test_no_simplify3(i16 %x) {
 
 @str32 = constant [1 x i32] [i32 0]
 
-; This could in principle be simplified, but the current implementation bails on
-; type mismatches.
+; This is safe to simplify despite the type mismatch.
+
 define i64 @test_no_simplify4() {
 ; CHECK-LABEL: @test_no_simplify4(
-; CHECK-NEXT:    [[L:%.*]] = call i64 @wcslen(i16* bitcast ([1 x i32]* @str32 to i16*))
-; CHECK-NEXT:    ret i64 [[L]]
+; CHECK-NEXT:    ret i64 0
 ;
   %l = call i64 @wcslen(i16* bitcast ([1 x i32]* @str32 to i16*))
   ret i64 %l

diff  --git a/llvm/test/Transforms/InstCombine/wcslen-6.ll b/llvm/test/Transforms/InstCombine/wcslen-6.ll
new file mode 100644
index 0000000000000..661309c42ae7c
--- /dev/null
+++ b/llvm/test/Transforms/InstCombine/wcslen-6.ll
@@ -0,0 +1,39 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+;
+; Verify that wcslen calls with a constant argument of a nonintegral type
+; of the same size as wchar_t or bigger doesn't cause trouble and is folded
+; as expected.
+;
+; RUN: opt < %s -passes=instcombine -S | FileCheck %s
+
+!llvm.module.flags = !{!0}
+!0 = !{i32 1, !"wchar_size", i32 4}
+
+declare i64 @wcslen(ptr)
+
+ at af = constant [2 x float] [float 0x3FF3B2FEC0000000, float 0.0]
+
+; TODO: Fold wcslen(af) to a constant.
+
+define i64 @fold_af() {
+; CHECK-LABEL: @fold_af(
+; CHECK-NEXT:    [[N:%.*]] = call i64 @wcslen(ptr nonnull @af)
+; CHECK-NEXT:    ret i64 [[N]]
+;
+  %n = call i64 @wcslen(ptr @af)
+  ret i64 %n
+}
+
+; TODO: Likewise, fold wcslen(as) to a constant.
+
+%struct.S = type { i32 }
+ at aS = constant [3 x %struct.S] [%struct.S { i32 2 }, %struct.S { i32 1 }, %struct.S zeroinitializer]
+
+define i64 @fold_aS() {
+; CHECK-LABEL: @fold_aS(
+; CHECK-NEXT:    [[N:%.*]] = call i64 @wcslen(ptr nonnull @aS)
+; CHECK-NEXT:    ret i64 [[N]]
+;
+  %n = call i64 @wcslen(ptr @aS)
+  ret i64 %n
+}


        


More information about the llvm-commits mailing list