[llvm] 7093b92 - [AssumeBundles] Preserve Information from Load/Store

via llvm-commits llvm-commits at lists.llvm.org
Tue Mar 31 08:47:43 PDT 2020


Author: Tyker
Date: 2020-03-31T17:47:04+02:00
New Revision: 7093b92a136b0696cf9cfc3f9822973ed886be5b

URL: https://github.com/llvm/llvm-project/commit/7093b92a136b0696cf9cfc3f9822973ed886be5b
DIFF: https://github.com/llvm/llvm-project/commit/7093b92a136b0696cf9cfc3f9822973ed886be5b.diff

LOG: [AssumeBundles] Preserve Information from Load/Store

Summary: This patch preserve dereferenceable, nonnull and alignment from loads and stores.

Reviewers: jdoerfert

Subscribers: hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D76759

Added: 
    

Modified: 
    llvm/lib/IR/KnowledgeRetention.cpp
    llvm/test/IR/assume-builder.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/IR/KnowledgeRetention.cpp b/llvm/lib/IR/KnowledgeRetention.cpp
index 04a27ed0f82a..b0a884a2ee2a 100644
--- a/llvm/lib/IR/KnowledgeRetention.cpp
+++ b/llvm/lib/IR/KnowledgeRetention.cpp
@@ -8,6 +8,7 @@
 
 #include "llvm/IR/KnowledgeRetention.h"
 #include "llvm/ADT/DenseSet.h"
+#include "llvm/IR/Function.h"
 #include "llvm/IR/InstIterator.h"
 #include "llvm/IR/IntrinsicInst.h"
 #include "llvm/IR/Module.h"
@@ -173,9 +174,49 @@ struct AssumeBuilderState {
         FnAssume, ArrayRef<Value *>({ConstantInt::getTrue(C)}), OpBundle));
   }
 
-  void addInstruction(const Instruction *I) {
+  void addAttr(Attribute::AttrKind Kind, Value *Ptr, unsigned Argument = 0) {
+    AssumedKnowledge AK;
+    AK.Name = Attribute::getNameFromAttrKind(Kind).data();
+    AK.WasOn.setPointer(Ptr);
+    if (Attribute::doesAttrKindHaveArgument(Kind)) {
+      AK.Argument =
+          ConstantInt::get(Type::getInt64Ty(M->getContext()), Argument);
+    } else {
+      AK.Argument = nullptr;
+      assert(Argument == 0 && "there should be no argument");
+    }
+    AssumedKnowledgeSet.insert(AK);
+  };
+
+  void addLoadOrStore(Instruction *I) {
+    auto Impl = [&](auto *MemInst, Type *T) {
+      uint64_t DerefSize =
+          I->getModule()->getDataLayout().getTypeStoreSize(T).getKnownMinSize();
+      if (DerefSize != 0) {
+        addAttr(Attribute::Dereferenceable, MemInst->getPointerOperand(),
+                DerefSize);
+        if (!NullPointerIsDefined(MemInst->getFunction(),
+                                  MemInst->getPointerOperand()
+                                      ->getType()
+                                      ->getPointerAddressSpace()))
+          addAttr(Attribute::NonNull, MemInst->getPointerOperand());
+      }
+      MaybeAlign MA = MemInst->getAlign();
+      if (MA.valueOrOne() > 1)
+        addAttr(Attribute::Alignment, MemInst->getPointerOperand(),
+                MA.valueOrOne().value());
+    };
+    if (auto *Load = dyn_cast<LoadInst>(I))
+      Impl(Load, Load->getType());
+    if (auto *Store = dyn_cast<StoreInst>(I))
+      Impl(Store, Store->getValueOperand()->getType());
+  }
+
+  void addInstruction(Instruction *I) {
     if (auto *Call = dyn_cast<CallBase>(I))
-      addCall(Call);
+      return addCall(Call);
+    if (isa<LoadInst>(I) || isa<StoreInst>(I))
+      return addLoadOrStore(I);
     // TODO: Add support for the other Instructions.
     // TODO: Maybe we should look around and merge with other llvm.assume.
   }

diff  --git a/llvm/test/IR/assume-builder.ll b/llvm/test/IR/assume-builder.ll
index 955be3a8225c..b39f5de6bdf3 100644
--- a/llvm/test/IR/assume-builder.ll
+++ b/llvm/test/IR/assume-builder.ll
@@ -2,6 +2,8 @@
 ; RUN: opt -passes='assume-builder,verify' --enable-knowledge-retention -S %s | FileCheck %s --check-prefixes=BASIC
 ; RUN: opt -passes='assume-builder,verify' --enable-knowledge-retention --assume-preserve-all -S %s | FileCheck %s --check-prefixes=ALL
 
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+
 declare void @func(i32*, i32*)
 declare void @func_cold(i32*) cold
 declare void @func_strbool(i32*) "no-jump-tables"
@@ -64,3 +66,259 @@ define void @test(i32* %P, i32* %P1, i32* %P2, i32* %P3) {
   call void @func(i32* nonnull %P1, i32* nonnull %P)
   ret void
 }
+
+%struct.S = type { i32, i8, i32* }
+
+define i32 @test2(%struct.S* %0, i32* %1, i8* %2) {
+; BASIC-LABEL: @test2(
+; BASIC-NEXT:    [[TMP4:%.*]] = alloca %struct.S*, align 8
+; BASIC-NEXT:    [[TMP5:%.*]] = alloca i32*, align 8
+; BASIC-NEXT:    [[TMP6:%.*]] = alloca i8*, align 8
+; BASIC-NEXT:    [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ]
+; BASIC-NEXT:    store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]) ]
+; BASIC-NEXT:    store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ]
+; BASIC-NEXT:    store i8* [[TMP2:%.*]], i8** [[TMP6]]
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]) ]
+; BASIC-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[TMP8]], i64 4), "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]) ]
+; BASIC-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
+; BASIC-NEXT:    [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8** [[TMP6]], i64 8), "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ]
+; BASIC-NEXT:    [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]) ]
+; BASIC-NEXT:    store i8 [[TMP10]], i8* [[TMP11]], align 1
+; BASIC-NEXT:    [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ]
+; BASIC-NEXT:    [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]]
+; BASIC-NEXT:    [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
+; BASIC-NEXT:    [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ]
+; BASIC-NEXT:    [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
+; BASIC-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i32* [[TMP17]], i64 4), "nonnull"(i32* [[TMP17]]) ]
+; BASIC-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ]
+; BASIC-NEXT:    [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
+; BASIC-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i8* [[TMP20]], i64 1), "nonnull"(i8* [[TMP20]]) ]
+; BASIC-NEXT:    [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4
+; BASIC-NEXT:    [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
+; BASIC-NEXT:    [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ]
+; BASIC-NEXT:    [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
+; BASIC-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32** [[TMP25]], i64 8), "dereferenceable"(i32** [[TMP25]], i64 8), "nonnull"(i32** [[TMP25]]) ]
+; BASIC-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[TMP26]], i64 4), "dereferenceable"(i32* [[TMP26]], i64 4), "nonnull"(i32* [[TMP26]]) ]
+; BASIC-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
+; BASIC-NEXT:    [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
+; BASIC-NEXT:    ret i32 [[TMP28]]
+;
+; ALL-LABEL: @test2(
+; ALL-NEXT:    [[TMP4:%.*]] = alloca %struct.S*, align 8
+; ALL-NEXT:    [[TMP5:%.*]] = alloca i32*, align 8
+; ALL-NEXT:    [[TMP6:%.*]] = alloca i8*, align 8
+; ALL-NEXT:    [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ]
+; ALL-NEXT:    store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]) ]
+; ALL-NEXT:    store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ]
+; ALL-NEXT:    store i8* [[TMP2:%.*]], i8** [[TMP6]]
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8), "nonnull"(i32** [[TMP5]]) ]
+; ALL-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[TMP8]], i64 4), "dereferenceable"(i32* [[TMP8]], i64 4), "nonnull"(i32* [[TMP8]]) ]
+; ALL-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
+; ALL-NEXT:    [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8** [[TMP6]], i64 8), "dereferenceable"(i8** [[TMP6]], i64 8), "nonnull"(i8** [[TMP6]]) ]
+; ALL-NEXT:    [[TMP11:%.*]] = load i8*, i8** [[TMP6]], align 8
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1), "nonnull"(i8* [[TMP11]]) ]
+; ALL-NEXT:    store i8 [[TMP10]], i8* [[TMP11]], align 1
+; ALL-NEXT:    [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ]
+; ALL-NEXT:    [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]]
+; ALL-NEXT:    [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
+; ALL-NEXT:    [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ]
+; ALL-NEXT:    [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
+; ALL-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i32* [[TMP17]], i64 4), "nonnull"(i32* [[TMP17]]) ]
+; ALL-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ]
+; ALL-NEXT:    [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
+; ALL-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i8* [[TMP20]], i64 1), "nonnull"(i8* [[TMP20]]) ]
+; ALL-NEXT:    [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4
+; ALL-NEXT:    [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
+; ALL-NEXT:    [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8), "nonnull"(%struct.S** [[TMP4]]) ]
+; ALL-NEXT:    [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
+; ALL-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32** [[TMP25]], i64 8), "dereferenceable"(i32** [[TMP25]], i64 8), "nonnull"(i32** [[TMP25]]) ]
+; ALL-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP25]], align 8
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[TMP26]], i64 4), "dereferenceable"(i32* [[TMP26]], i64 4), "nonnull"(i32* [[TMP26]]) ]
+; ALL-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
+; ALL-NEXT:    [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
+; ALL-NEXT:    ret i32 [[TMP28]]
+;
+  %4 = alloca %struct.S*, align 8
+  %5 = alloca i32*, align 8
+  %6 = alloca i8*, align 8
+  %7 = alloca %struct.S, align 8
+  store %struct.S* %0, %struct.S** %4, align 8
+  store i32* %1, i32** %5, align 8
+  store i8* %2, i8** %6
+  %8 = load i32*, i32** %5, align 8
+  %9 = load i32, i32* %8, align 4
+  %10 = trunc i32 %9 to i8
+  %11 = load i8*, i8** %6, align 8
+  store i8 %10, i8* %11, align 1
+  %12 = bitcast %struct.S* %7 to i8*
+  %13 = load %struct.S*, %struct.S** %4
+  %14 = bitcast %struct.S* %13 to i8*
+  %15 = bitcast %struct.S* %7 to i8*
+  %16 = load %struct.S*, %struct.S** %4, align 8
+  %17 = getelementptr inbounds %struct.S, %struct.S* %16, i32 0, i32 0
+  %18 = load i32, i32* %17, align 8
+  %19 = load %struct.S*, %struct.S** %4, align 8
+  %20 = getelementptr inbounds %struct.S, %struct.S* %19, i32 0, i32 1
+  %21 = load i8, i8* %20, align 4
+  %22 = sext i8 %21 to i32
+  %23 = add nsw i32 %18, %22
+  %24 = load %struct.S*, %struct.S** %4, align 8
+  %25 = getelementptr inbounds %struct.S, %struct.S* %24, i32 0, i32 2
+  %26 = load i32*, i32** %25, align 8
+  %27 = load i32, i32* %26, align 4
+  %28 = add nsw i32 %23, %27
+  ret i32 %28
+}
+
+define i32 @test3(%struct.S* %0, i32* %1, i8* %2) "null-pointer-is-valid"="true" {
+; BASIC-LABEL: @test3(
+; BASIC-NEXT:    [[TMP4:%.*]] = alloca %struct.S*, align 8
+; BASIC-NEXT:    [[TMP5:%.*]] = alloca i32*, align 8
+; BASIC-NEXT:    [[TMP6:%.*]] = alloca i8*, align 8
+; BASIC-NEXT:    [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ]
+; BASIC-NEXT:    store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8) ]
+; BASIC-NEXT:    store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8** [[TMP6]], i64 8), "dereferenceable"(i8** [[TMP6]], i64 8) ]
+; BASIC-NEXT:    store i8* [[TMP2:%.*]], i8** [[TMP6]], align 8
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8) ]
+; BASIC-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[TMP8]], i64 4), "dereferenceable"(i32* [[TMP8]], i64 4) ]
+; BASIC-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
+; BASIC-NEXT:    [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8) ]
+; BASIC-NEXT:    [[TMP11:%.*]] = load i8*, i8** [[TMP6]]
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1) ]
+; BASIC-NEXT:    store i8 [[TMP10]], i8* [[TMP11]], align 1
+; BASIC-NEXT:    [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ]
+; BASIC-NEXT:    [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
+; BASIC-NEXT:    [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
+; BASIC-NEXT:    [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ]
+; BASIC-NEXT:    [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
+; BASIC-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i32* [[TMP17]], i64 4) ]
+; BASIC-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ]
+; BASIC-NEXT:    [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
+; BASIC-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i8* [[TMP20]], i64 1) ]
+; BASIC-NEXT:    [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4
+; BASIC-NEXT:    [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
+; BASIC-NEXT:    [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ]
+; BASIC-NEXT:    [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
+; BASIC-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8) ]
+; BASIC-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP25]]
+; BASIC-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[TMP26]], i64 4), "dereferenceable"(i32* [[TMP26]], i64 4) ]
+; BASIC-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
+; BASIC-NEXT:    [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
+; BASIC-NEXT:    ret i32 [[TMP28]]
+;
+; ALL-LABEL: @test3(
+; ALL-NEXT:    [[TMP4:%.*]] = alloca %struct.S*, align 8
+; ALL-NEXT:    [[TMP5:%.*]] = alloca i32*, align 8
+; ALL-NEXT:    [[TMP6:%.*]] = alloca i8*, align 8
+; ALL-NEXT:    [[TMP7:%.*]] = alloca [[STRUCT_S:%.*]], align 8
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ]
+; ALL-NEXT:    store %struct.S* [[TMP0:%.*]], %struct.S** [[TMP4]], align 8
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8) ]
+; ALL-NEXT:    store i32* [[TMP1:%.*]], i32** [[TMP5]], align 8
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8** [[TMP6]], i64 8), "dereferenceable"(i8** [[TMP6]], i64 8) ]
+; ALL-NEXT:    store i8* [[TMP2:%.*]], i8** [[TMP6]], align 8
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32** [[TMP5]], i64 8), "dereferenceable"(i32** [[TMP5]], i64 8) ]
+; ALL-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[TMP5]], align 8
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[TMP8]], i64 4), "dereferenceable"(i32* [[TMP8]], i64 4) ]
+; ALL-NEXT:    [[TMP9:%.*]] = load i32, i32* [[TMP8]], align 4
+; ALL-NEXT:    [[TMP10:%.*]] = trunc i32 [[TMP9]] to i8
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i8** [[TMP6]], i64 8) ]
+; ALL-NEXT:    [[TMP11:%.*]] = load i8*, i8** [[TMP6]]
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i8* [[TMP11]], i64 1) ]
+; ALL-NEXT:    store i8 [[TMP10]], i8* [[TMP11]], align 1
+; ALL-NEXT:    [[TMP12:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ]
+; ALL-NEXT:    [[TMP13:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
+; ALL-NEXT:    [[TMP14:%.*]] = bitcast %struct.S* [[TMP13]] to i8*
+; ALL-NEXT:    [[TMP15:%.*]] = bitcast %struct.S* [[TMP7]] to i8*
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ]
+; ALL-NEXT:    [[TMP16:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
+; ALL-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP16]], i32 0, i32 0
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[TMP17]], i64 8), "dereferenceable"(i32* [[TMP17]], i64 4) ]
+; ALL-NEXT:    [[TMP18:%.*]] = load i32, i32* [[TMP17]], align 8
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ]
+; ALL-NEXT:    [[TMP19:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
+; ALL-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP19]], i32 0, i32 1
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(i8* [[TMP20]], i64 4), "dereferenceable"(i8* [[TMP20]], i64 1) ]
+; ALL-NEXT:    [[TMP21:%.*]] = load i8, i8* [[TMP20]], align 4
+; ALL-NEXT:    [[TMP22:%.*]] = sext i8 [[TMP21]] to i32
+; ALL-NEXT:    [[TMP23:%.*]] = add nsw i32 [[TMP18]], [[TMP22]]
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(%struct.S** [[TMP4]], i64 8), "dereferenceable"(%struct.S** [[TMP4]], i64 8) ]
+; ALL-NEXT:    [[TMP24:%.*]] = load %struct.S*, %struct.S** [[TMP4]], align 8
+; ALL-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP24]], i32 0, i32 2
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32** [[TMP25]], i64 8) ]
+; ALL-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[TMP25]]
+; ALL-NEXT:    call void @llvm.assume(i1 true) [ "align"(i32* [[TMP26]], i64 4), "dereferenceable"(i32* [[TMP26]], i64 4) ]
+; ALL-NEXT:    [[TMP27:%.*]] = load i32, i32* [[TMP26]], align 4
+; ALL-NEXT:    [[TMP28:%.*]] = add nsw i32 [[TMP23]], [[TMP27]]
+; ALL-NEXT:    ret i32 [[TMP28]]
+;
+  %4 = alloca %struct.S*, align 8
+  %5 = alloca i32*, align 8
+  %6 = alloca i8*, align 8
+  %7 = alloca %struct.S, align 8
+  store %struct.S* %0, %struct.S** %4, align 8
+  store i32* %1, i32** %5, align 8
+  store i8* %2, i8** %6, align 8
+  %8 = load i32*, i32** %5, align 8
+  %9 = load i32, i32* %8, align 4
+  %10 = trunc i32 %9 to i8
+  %11 = load i8*, i8** %6
+  store i8 %10, i8* %11, align 1
+  %12 = bitcast %struct.S* %7 to i8*
+  %13 = load %struct.S*, %struct.S** %4, align 8
+  %14 = bitcast %struct.S* %13 to i8*
+  %15 = bitcast %struct.S* %7 to i8*
+  %16 = load %struct.S*, %struct.S** %4, align 8
+  %17 = getelementptr inbounds %struct.S, %struct.S* %16, i32 0, i32 0
+  %18 = load i32, i32* %17, align 8
+  %19 = load %struct.S*, %struct.S** %4, align 8
+  %20 = getelementptr inbounds %struct.S, %struct.S* %19, i32 0, i32 1
+  %21 = load i8, i8* %20, align 4
+  %22 = sext i8 %21 to i32
+  %23 = add nsw i32 %18, %22
+  %24 = load %struct.S*, %struct.S** %4, align 8
+  %25 = getelementptr inbounds %struct.S, %struct.S* %24, i32 0, i32 2
+  %26 = load i32*, i32** %25
+  %27 = load i32, i32* %26, align 4
+  %28 = add nsw i32 %23, %27
+  ret i32 %28
+}


        


More information about the llvm-commits mailing list