[llvm] 19e83a9 - [ValueTracking] Pointer is known nonnull after load/store

Nikita Popov via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 11 11:41:25 PST 2019


Author: Danila Kutenin
Date: 2019-12-11T20:32:29+01:00
New Revision: 19e83a9b4cd4b0c2918d975f52bdfc6ba82d839f

URL: https://github.com/llvm/llvm-project/commit/19e83a9b4cd4b0c2918d975f52bdfc6ba82d839f
DIFF: https://github.com/llvm/llvm-project/commit/19e83a9b4cd4b0c2918d975f52bdfc6ba82d839f.diff

LOG: [ValueTracking] Pointer is known nonnull after load/store

If the pointer was loaded/stored before the null check, the check
is redundant and can be removed. For now the optimizers do not
remove the nullptr check, see https://gcc.godbolt.org/z/H2r5GG.
The patch allows to use more nonnull constraints. Also, it found
one more optimization in some PowerPC test. This is my first llvm
review, I am free to any comments.

Differential Revision: https://reviews.llvm.org/D71177

Added: 
    

Modified: 
    llvm/lib/Analysis/ValueTracking.cpp
    llvm/test/Analysis/ValueTracking/known-nonnull-at.ll
    llvm/test/CodeGen/PowerPC/pr39815.ll
    llvm/test/Transforms/Coroutines/coro-swifterror.ll
    llvm/test/Transforms/InstCombine/element-atomic-memintrins.ll
    llvm/test/Transforms/InstCombine/phi-equal-incoming-pointers.ll
    llvm/test/Transforms/InstCombine/sink-alloca.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index e35b235b0bbc..0d15187f575c 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -1943,6 +1943,15 @@ static bool isKnownNonNullFromDominatingCondition(const Value *V,
               Arg.hasNonNullAttr() && DT->dominates(CS.getInstruction(), CtxI))
             return true;
 
+    // If the value is used as a load/store, then the pointer must be non null.
+    if (V == getLoadStorePointerOperand(U)) {
+      const Instruction *I = cast<Instruction>(U);
+      if (!NullPointerIsDefined(I->getFunction(),
+                                V->getType()->getPointerAddressSpace()) &&
+          DT->dominates(I, CtxI))
+        return true;
+    }
+
     // Consider only compare instructions uniquely controlling a branch
     CmpInst::Predicate Pred;
     if (!match(const_cast<User *>(U),

diff  --git a/llvm/test/Analysis/ValueTracking/known-nonnull-at.ll b/llvm/test/Analysis/ValueTracking/known-nonnull-at.ll
index 1414f230e2b0..9df8c2855132 100644
--- a/llvm/test/Analysis/ValueTracking/known-nonnull-at.ll
+++ b/llvm/test/Analysis/ValueTracking/known-nonnull-at.ll
@@ -125,14 +125,12 @@ define i1 @unknownReturnTest(i8* %x) {
   ret i1 %null_check
 }
 
-; TODO: Make sure that if load/store happened, the pointer is nonnull.
+; Make sure that if load/store happened, the pointer is nonnull.
 
 define i32 @test_null_after_store(i32* %0) {
 ; CHECK-LABEL: @test_null_after_store(
 ; CHECK-NEXT:    store i32 123, i32* [[TMP0:%.*]], align 4
-; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32* [[TMP0]], null
-; CHECK-NEXT:    [[TMP3:%.*]] = select i1 [[TMP2]], i32 1, i32 2
-; CHECK-NEXT:    ret i32 [[TMP3]]
+; CHECK-NEXT:    ret i32 2
 ;
   store i32 123, i32* %0, align 4
   %2 = icmp eq i32* %0, null
@@ -142,10 +140,7 @@ define i32 @test_null_after_store(i32* %0) {
 
 define i32 @test_null_after_load(i32* %0) {
 ; CHECK-LABEL: @test_null_after_load(
-; CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP0:%.*]], align 4
-; CHECK-NEXT:    [[TMP3:%.*]] = icmp eq i32* [[TMP0]], null
-; CHECK-NEXT:    [[TMP4:%.*]] = select i1 [[TMP3]], i32 [[TMP2]], i32 1
-; CHECK-NEXT:    ret i32 [[TMP4]]
+; CHECK-NEXT:    ret i32 1
 ;
   %2 = load i32, i32* %0, align 4
   %3 = icmp eq i32* %0, null

diff  --git a/llvm/test/CodeGen/PowerPC/pr39815.ll b/llvm/test/CodeGen/PowerPC/pr39815.ll
index badba3131954..a0cd0644f19b 100644
--- a/llvm/test/CodeGen/PowerPC/pr39815.ll
+++ b/llvm/test/CodeGen/PowerPC/pr39815.ll
@@ -20,11 +20,10 @@ entry:
 ; CHECK:      # %bb.0:
 ; CHECK-DAG:   addis [[REG1:[0-9]+]], [[REG2:[0-9]+]], [[VAR1:[a-z0-9A-Z_.]+]]@toc at ha
 ; CHECK-DAG:   ld [[REG3:[0-9]+]], [[VAR1]]@toc at l([[REG1]])
-; CHECK-DAG:   lwz [[REG4:[0-9]+]], 0([[REG3]])
-; CHECK-DAG:   addic [[REG5:[0-9]+]], [[REG3]], -1
-; CHECK-DAG:   addze [[REG7:[0-9]+]], [[REG4]]
-; CHECK-DAG:   addis [[REG8:[0-9]+]], [[REG2]], [[VAR2:[a-z0-9A-Z_.]+]]@toc at ha
+; CHECK-DAG:   lbz [[REG4:[0-9]+]], 0([[REG3]])
+; CHECK-DAG:   addi [[REG7:[0-9]+]], [[REG4]]
 ; CHECK-DAG:   andi. [[REG9:[0-9]+]], [[REG7]], 5
+; CHECK-DAG:   addis [[REG8:[0-9]+]], [[REG2]], [[VAR2:[a-z0-9A-Z_.]+]]@toc at ha
 ; CHECK-DAG:   stb [[REG9]], [[VAR2]]@toc at l([[REG8]])
 ; CHECK:       blr
 }

diff  --git a/llvm/test/Transforms/Coroutines/coro-swifterror.ll b/llvm/test/Transforms/Coroutines/coro-swifterror.ll
index cf50bcd05472..932e448a5719 100644
--- a/llvm/test/Transforms/Coroutines/coro-swifterror.ll
+++ b/llvm/test/Transforms/Coroutines/coro-swifterror.ll
@@ -33,7 +33,7 @@ cleanup:
 ; CHECK-NEXT:    call void @print(i32 %n)
 ;   TODO: figure out a way to eliminate this
 ; CHECK-NEXT:    store i8* null, i8** %errorslot
-; CHECK-NEXT:    call void @maybeThrow(i8** swifterror %errorslot)
+; CHECK-NEXT:    call void @maybeThrow(i8** nonnull swifterror %errorslot)
 ; CHECK-NEXT:    [[T1:%.*]] = load i8*, i8** %errorslot
 ; CHECK-NEXT:    call void @logError(i8* [[T1]])
 ; CHECK-NEXT:    store i8* [[T1]], i8** %errorslot
@@ -51,7 +51,7 @@ cleanup:
 ; CHECK-NEXT:    store i32 %inc, i32* [[T0]], align 4
 ; CHECK-NEXT:    call void @print(i32 %inc)
 ; CHECK-NEXT:    store i8* [[ERROR]], i8** %2
-; CHECK-NEXT:    call void @maybeThrow(i8** swifterror %2)
+; CHECK-NEXT:    call void @maybeThrow(i8** nonnull swifterror %2)
 ; CHECK-NEXT:    [[T2:%.*]] = load i8*, i8** %2
 ; CHECK-NEXT:    call void @logError(i8* [[T2]])
 ; CHECK-NEXT:    store i8* [[T2]], i8** %2

diff  --git a/llvm/test/Transforms/InstCombine/element-atomic-memintrins.ll b/llvm/test/Transforms/InstCombine/element-atomic-memintrins.ll
index 6bc62c94e326..a49b4c7af7e3 100644
--- a/llvm/test/Transforms/InstCombine/element-atomic-memintrins.ll
+++ b/llvm/test/Transforms/InstCombine/element-atomic-memintrins.ll
@@ -15,10 +15,10 @@ define void @test_memset_zero_length(i8* %dest) {
 define void @test_memset_to_store(i8* %dest) {
 ; CHECK-LABEL: @test_memset_to_store(
 ; CHECK-NEXT:    store atomic i8 1, i8* [[DEST:%.*]] unordered, align 1
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 2, i32 1)
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 4, i32 1)
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 8, i32 1)
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 [[DEST]], i8 1, i32 16, i32 1)
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 2, i32 1)
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 4, i32 1)
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 8, i32 1)
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 1 [[DEST]], i8 1, i32 16, i32 1)
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %dest, i8 1, i32 1, i32 1)
@@ -34,9 +34,9 @@ define void @test_memset_to_store_2(i8* %dest) {
 ; CHECK-NEXT:    store atomic i8 1, i8* [[DEST:%.*]] unordered, align 2
 ; CHECK-NEXT:    [[TMP1:%.*]] = bitcast i8* [[DEST]] to i16*
 ; CHECK-NEXT:    store atomic i16 257, i16* [[TMP1]] unordered, align 2
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 [[DEST]], i8 1, i32 4, i32 2)
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 [[DEST]], i8 1, i32 8, i32 2)
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 [[DEST]], i8 1, i32 16, i32 2)
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 2 [[DEST]], i8 1, i32 4, i32 2)
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 2 [[DEST]], i8 1, i32 8, i32 2)
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 2 [[DEST]], i8 1, i32 16, i32 2)
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 2 %dest, i8 1, i32 1, i32 1)
@@ -54,8 +54,8 @@ define void @test_memset_to_store_4(i8* %dest) {
 ; CHECK-NEXT:    store atomic i16 257, i16* [[TMP1]] unordered, align 4
 ; CHECK-NEXT:    [[TMP2:%.*]] = bitcast i8* [[DEST]] to i32*
 ; CHECK-NEXT:    store atomic i32 16843009, i32* [[TMP2]] unordered, align 4
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 [[DEST]], i8 1, i32 8, i32 4)
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 [[DEST]], i8 1, i32 16, i32 4)
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 4 [[DEST]], i8 1, i32 8, i32 4)
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 4 [[DEST]], i8 1, i32 16, i32 4)
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 4 %dest, i8 1, i32 1, i32 1)
@@ -75,7 +75,7 @@ define void @test_memset_to_store_8(i8* %dest) {
 ; CHECK-NEXT:    store atomic i32 16843009, i32* [[TMP2]] unordered, align 8
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i64*
 ; CHECK-NEXT:    store atomic i64 72340172838076673, i64* [[TMP3]] unordered, align 8
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 [[DEST]], i8 1, i32 16, i32 8)
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 8 [[DEST]], i8 1, i32 16, i32 8)
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 8 %dest, i8 1, i32 1, i32 1)
@@ -95,7 +95,7 @@ define void @test_memset_to_store_16(i8* %dest) {
 ; CHECK-NEXT:    store atomic i32 16843009, i32* [[TMP2]] unordered, align 16
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i64*
 ; CHECK-NEXT:    store atomic i64 72340172838076673, i64* [[TMP3]] unordered, align 16
-; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 [[DEST]], i8 1, i32 16, i32 16)
+; CHECK-NEXT:    call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* nonnull align 16 [[DEST]], i8 1, i32 16, i32 16)
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 16 %dest, i8 1, i32 1, i32 1)
@@ -154,10 +154,10 @@ define void @test_memmove_loadstore(i8* %dest, i8* %src) {
 ; CHECK-LABEL: @test_memmove_loadstore(
 ; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 1
 ; CHECK-NEXT:    store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 1
-; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 2, i32 1)
-; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 4, i32 1)
-; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 8, i32 1)
-; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 16, i32 1)
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 2, i32 1)
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 4, i32 1)
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 8, i32 1)
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 16, i32 1)
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 1, i32 1)
@@ -176,9 +176,9 @@ define void @test_memmove_loadstore_2(i8* %dest, i8* %src) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
 ; CHECK-NEXT:    [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 2
 ; CHECK-NEXT:    store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 2
-; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 4, i32 2)
-; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 8, i32 2)
-; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 16, i32 2)
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 4, i32 2)
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 8, i32 2)
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 16, i32 2)
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 1, i32 1)
@@ -201,8 +201,8 @@ define void @test_memmove_loadstore_4(i8* %dest, i8* %src) {
 ; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32*
 ; CHECK-NEXT:    [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 4
 ; CHECK-NEXT:    store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 4
-; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 8, i32 4)
-; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 16, i32 4)
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 8, i32 4)
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 16, i32 4)
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 1, i32 1)
@@ -229,7 +229,7 @@ define void @test_memmove_loadstore_8(i8* %dest, i8* %src) {
 ; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
 ; CHECK-NEXT:    [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 8
 ; CHECK-NEXT:    store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 8
-; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i32 16, i32 8)
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 8 [[DEST]], i8* nonnull align 8 [[SRC]], i32 16, i32 8)
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 1, i32 1)
@@ -256,7 +256,7 @@ define void @test_memmove_loadstore_16(i8* %dest, i8* %src) {
 ; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
 ; CHECK-NEXT:    [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 16
 ; CHECK-NEXT:    store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 16
-; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 [[DEST:%.*]], i8* align 16 [[SRC:%.*]], i32 16, i32 16)
+; CHECK-NEXT:    call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 16 [[DEST]], i8* nonnull align 16 [[SRC]], i32 16, i32 16)
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 1, i32 1)
@@ -302,10 +302,10 @@ define void @test_memcpy_loadstore(i8* %dest, i8* %src) {
 ; CHECK-LABEL: @test_memcpy_loadstore(
 ; CHECK-NEXT:    [[TMP1:%.*]] = load atomic i8, i8* [[SRC:%.*]] unordered, align 1
 ; CHECK-NEXT:    store atomic i8 [[TMP1]], i8* [[DEST:%.*]] unordered, align 1
-; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 2, i32 1)
-; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 4, i32 1)
-; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 8, i32 1)
-; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 [[DEST]], i8* align 1 [[SRC]], i32 16, i32 1)
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 2, i32 1)
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 4, i32 1)
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 8, i32 1)
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 1 [[DEST]], i8* nonnull align 1 [[SRC]], i32 16, i32 1)
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 1 %dest, i8* align 1 %src, i32 1, i32 1)
@@ -324,9 +324,9 @@ define void @test_memcpy_loadstore_2(i8* %dest, i8* %src) {
 ; CHECK-NEXT:    [[TMP3:%.*]] = bitcast i8* [[DEST]] to i16*
 ; CHECK-NEXT:    [[TMP4:%.*]] = load atomic i16, i16* [[TMP2]] unordered, align 2
 ; CHECK-NEXT:    store atomic i16 [[TMP4]], i16* [[TMP3]] unordered, align 2
-; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 4, i32 2)
-; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 8, i32 2)
-; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 [[DEST]], i8* align 2 [[SRC]], i32 16, i32 2)
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 4, i32 2)
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 8, i32 2)
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 2 [[DEST]], i8* nonnull align 2 [[SRC]], i32 16, i32 2)
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 2 %dest, i8* align 2 %src, i32 1, i32 1)
@@ -349,8 +349,8 @@ define void @test_memcpy_loadstore_4(i8* %dest, i8* %src) {
 ; CHECK-NEXT:    [[TMP6:%.*]] = bitcast i8* [[DEST]] to i32*
 ; CHECK-NEXT:    [[TMP7:%.*]] = load atomic i32, i32* [[TMP5]] unordered, align 4
 ; CHECK-NEXT:    store atomic i32 [[TMP7]], i32* [[TMP6]] unordered, align 4
-; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 8, i32 4)
-; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 [[DEST]], i8* align 4 [[SRC]], i32 16, i32 4)
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 8, i32 4)
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 4 [[DEST]], i8* nonnull align 4 [[SRC]], i32 16, i32 4)
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 4 %dest, i8* align 4 %src, i32 1, i32 1)
@@ -377,7 +377,7 @@ define void @test_memcpy_loadstore_8(i8* %dest, i8* %src) {
 ; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
 ; CHECK-NEXT:    [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 8
 ; CHECK-NEXT:    store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 8
-; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 [[DEST]], i8* align 8 [[SRC]], i32 16, i32 8)
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 8 [[DEST]], i8* nonnull align 8 [[SRC]], i32 16, i32 8)
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 8 %dest, i8* align 8 %src, i32 1, i32 1)
@@ -404,7 +404,7 @@ define void @test_memcpy_loadstore_16(i8* %dest, i8* %src) {
 ; CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8* [[DEST]] to i64*
 ; CHECK-NEXT:    [[TMP10:%.*]] = load atomic i64, i64* [[TMP8]] unordered, align 16
 ; CHECK-NEXT:    store atomic i64 [[TMP10]], i64* [[TMP9]] unordered, align 16
-; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 [[DEST:%.*]], i8* align 16 [[SRC:%.*]], i32 16, i32 16)
+; CHECK-NEXT:    call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* nonnull align 16 [[DEST]], i8* nonnull align 16 [[SRC]], i32 16, i32 16)
 ; CHECK-NEXT:    ret void
 ;
   call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* align 16 %dest, i8* align 16 %src, i32 1, i32 1)

diff  --git a/llvm/test/Transforms/InstCombine/phi-equal-incoming-pointers.ll b/llvm/test/Transforms/InstCombine/phi-equal-incoming-pointers.ll
index 882df7b37229..4fe575eaaa9b 100644
--- a/llvm/test/Transforms/InstCombine/phi-equal-incoming-pointers.ll
+++ b/llvm/test/Transforms/InstCombine/phi-equal-incoming-pointers.ll
@@ -474,13 +474,13 @@ define i32 @test_extra_uses_non_inbounds(i1 %cond, i1 %cond2) {
 ; ALL-NEXT:    [[PTR1:%.*]] = getelementptr i8, i8* [[OBJ]], i64 16
 ; ALL-NEXT:    [[PTR1_TYPED:%.*]] = bitcast i8* [[PTR1]] to i32*
 ; ALL-NEXT:    [[RES1:%.*]] = load i32, i32* [[PTR1_TYPED]], align 4
-; ALL-NEXT:    call void @foo.i32(i32* [[PTR1_TYPED]])
+; ALL-NEXT:    call void @foo.i32(i32* nonnull [[PTR1_TYPED]])
 ; ALL-NEXT:    br label [[EXIT:%.*]]
 ; ALL:       bb2:
 ; ALL-NEXT:    [[PTR2:%.*]] = getelementptr i8, i8* [[OBJ]], i64 16
 ; ALL-NEXT:    [[PTR2_TYPED:%.*]] = bitcast i8* [[PTR2]] to i32*
 ; ALL-NEXT:    [[RES2:%.*]] = load i32, i32* [[PTR2_TYPED]], align 4
-; ALL-NEXT:    call void @foo.i32(i32* [[PTR2_TYPED]])
+; ALL-NEXT:    call void @foo.i32(i32* nonnull [[PTR2_TYPED]])
 ; ALL-NEXT:    br label [[EXIT]]
 ; ALL:       exit:
 ; ALL-NEXT:    [[PTR_TYPED:%.*]] = phi i32* [ [[PTR1_TYPED]], [[BB1]] ], [ [[PTR2_TYPED]], [[BB2]] ]

diff  --git a/llvm/test/Transforms/InstCombine/sink-alloca.ll b/llvm/test/Transforms/InstCombine/sink-alloca.ll
index f2de74ff533b..dc5012680d0a 100644
--- a/llvm/test/Transforms/InstCombine/sink-alloca.ll
+++ b/llvm/test/Transforms/InstCombine/sink-alloca.ll
@@ -47,6 +47,6 @@ ret:                                              ; preds = %sinktarget, %nonent
 ; CHECK:   %p = call i32* @use_and_return(i32* nonnull %argmem)
 ; CHECK:   store i32 13, i32* %p
 ; CHECK:   call void @llvm.stackrestore(i8* %sp)
-; CHECK:   %0 = call i32* @use_and_return(i32* %p)
+; CHECK:   %0 = call i32* @use_and_return(i32* nonnull %p)
 
 attributes #0 = { nounwind }


        


More information about the llvm-commits mailing list