[llvm] r336073 - Implement strip.invariant.group

Piotr Padlewski via llvm-commits llvm-commits at lists.llvm.org
Sun Jul 1 21:49:30 PDT 2018


Author: prazek
Date: Sun Jul  1 21:49:30 2018
New Revision: 336073

URL: http://llvm.org/viewvc/llvm-project?rev=336073&view=rev
Log:
Implement strip.invariant.group

Summary:
This patch introduce new intrinsic -
strip.invariant.group that was described in the
RFC: Devirtualization v2

Reviewers: rsmith, hfinkel, nlopes, sanjoy, amharc, kuhar

Subscribers: arsenm, nhaehnle, JDevlieghere, hiraditya, xbolva00, llvm-commits

Differential Revision: https://reviews.llvm.org/D47103

Co-authored-by: Krzysztof Pszeniczny <krzysztof.pszeniczny at gmail.com>

Added:
    llvm/trunk/test/Other/invariant.group.ll
    llvm/trunk/test/Transforms/GlobalOpt/invariant.group.ll
Removed:
    llvm/trunk/test/Other/launder.invariant.group.ll
    llvm/trunk/test/Transforms/GlobalOpt/invariant.group.barrier.ll
Modified:
    llvm/trunk/docs/LangRef.rst
    llvm/trunk/include/llvm/IR/IRBuilder.h
    llvm/trunk/include/llvm/IR/Intrinsics.td
    llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp
    llvm/trunk/lib/Analysis/ConstantFolding.cpp
    llvm/trunk/lib/Analysis/ValueTracking.cpp
    llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp
    llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp
    llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
    llvm/trunk/lib/IR/Value.cpp
    llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
    llvm/trunk/test/Analysis/ValueTracking/invariant.group.ll
    llvm/trunk/test/CodeGen/Generic/intrinsics.ll
    llvm/trunk/test/Transforms/CodeGenPrepare/invariant.group.ll
    llvm/trunk/test/Transforms/DeadStoreElimination/launder.invariant.group.ll
    llvm/trunk/test/Transforms/FunctionAttrs/nocapture.ll
    llvm/trunk/test/Transforms/GVN/invariant.group.ll
    llvm/trunk/test/Transforms/InstCombine/invariant.group.ll
    llvm/trunk/test/Transforms/NewGVN/invariant.group.ll

Modified: llvm/trunk/docs/LangRef.rst
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/docs/LangRef.rst?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/docs/LangRef.rst (original)
+++ llvm/trunk/docs/LangRef.rst Sun Jul  1 21:49:30 2018
@@ -13350,16 +13350,17 @@ Overview:
 """""""""
 
 The '``llvm.launder.invariant.group``' intrinsic can be used when an invariant
-established by invariant.group metadata no longer holds, to obtain a new pointer
-value that does not carry the invariant information. It is an experimental
-intrinsic, which means that its semantics might change in the future.
+established by ``invariant.group`` metadata no longer holds, to obtain a new
+pointer value that carries fresh invariant group information. It is an
+experimental intrinsic, which means that its semantics might change in the
+future.
 
 
 Arguments:
 """"""""""
 
-The ``llvm.launder.invariant.group`` takes only one argument, which is
-the pointer to the memory for which the ``invariant.group`` no longer holds.
+The ``llvm.launder.invariant.group`` takes only one argument, which is a pointer
+to the memory.
 
 Semantics:
 """"""""""
@@ -13368,6 +13369,43 @@ Returns another pointer that aliases its
 for the purposes of ``load``/``store`` ``invariant.group`` metadata.
 It does not read any accessible memory and the execution can be speculated.
 
+'``llvm.strip.invariant.group``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+This is an overloaded intrinsic. The memory object can belong to any address
+space. The returned pointer must belong to the same address space as the
+argument.
+
+::
+
+      declare i8* @llvm.strip.invariant.group.p0i8(i8* <ptr>)
+
+Overview:
+"""""""""
+
+The '``llvm.strip.invariant.group``' intrinsic can be used when an invariant
+established by ``invariant.group`` metadata no longer holds, to obtain a new pointer
+value that does not carry the invariant information. It is an experimental
+intrinsic, which means that its semantics might change in the future.
+
+
+Arguments:
+""""""""""
+
+The ``llvm.strip.invariant.group`` takes only one argument, which is a pointer
+to the memory.
+
+Semantics:
+""""""""""
+
+Returns another pointer that aliases its argument but which has no associated
+``invariant.group`` metadata.
+It does not read any memory and can be speculated.
+
+
+
 .. _constrainedfp:
 
 Constrained Floating-Point Intrinsics

Modified: llvm/trunk/include/llvm/IR/IRBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IRBuilder.h?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/IRBuilder.h (original)
+++ llvm/trunk/include/llvm/IR/IRBuilder.h Sun Jul  1 21:49:30 2018
@@ -2022,6 +2022,7 @@ public:
   Value *CreateLaunderInvariantGroup(Value *Ptr) {
     assert(isa<PointerType>(Ptr->getType()) &&
            "launder.invariant.group only applies to pointers.");
+    // FIXME: we could potentially avoid casts to/from i8*.
     auto *PtrType = Ptr->getType();
     auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
     if (PtrType != Int8PtrTy)
@@ -2039,6 +2040,34 @@ public:
 
     if (PtrType != Int8PtrTy)
       return CreateBitCast(Fn, PtrType);
+    return Fn;
+  }
+
+  /// \brief Create a strip.invariant.group intrinsic call. If Ptr type is
+  /// different from pointer to i8, it's casted to pointer to i8 in the same
+  /// address space before call and casted back to Ptr type after call.
+  Value *CreateStripInvariantGroup(Value *Ptr) {
+    assert(isa<PointerType>(Ptr->getType()) &&
+           "strip.invariant.group only applies to pointers.");
+
+    // FIXME: we could potentially avoid casts to/from i8*.
+    auto *PtrType = Ptr->getType();
+    auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
+    if (PtrType != Int8PtrTy)
+      Ptr = CreateBitCast(Ptr, Int8PtrTy);
+    Module *M = BB->getParent()->getParent();
+    Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
+        M, Intrinsic::strip_invariant_group, {Int8PtrTy});
+
+    assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
+           FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
+               Int8PtrTy &&
+           "StripInvariantGroup should take and return the same type");
+
+    CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr});
+
+    if (PtrType != Int8PtrTy)
+      return CreateBitCast(Fn, PtrType);
     return Fn;
   }
 

Modified: llvm/trunk/include/llvm/IR/Intrinsics.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/Intrinsics.td?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/Intrinsics.td (original)
+++ llvm/trunk/include/llvm/IR/Intrinsics.td Sun Jul  1 21:49:30 2018
@@ -728,6 +728,11 @@ def int_launder_invariant_group : Intrin
                                             [LLVMMatchType<0>],
                                             [IntrInaccessibleMemOnly, IntrSpeculatable]>;
 
+
+def int_strip_invariant_group : Intrinsic<[llvm_anyptr_ty],
+                                          [LLVMMatchType<0>],
+                                          [IntrSpeculatable, IntrNoMem]>;
+
 //===------------------------ Stackmap Intrinsics -------------------------===//
 //
 def int_experimental_stackmap : Intrinsic<[],

Modified: llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp (original)
+++ llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp Sun Jul  1 21:49:30 2018
@@ -431,13 +431,15 @@ bool BasicAAResult::DecomposeGEPExpressi
     const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
     if (!GEPOp) {
       if (auto CS = ImmutableCallSite(V)) {
-        // Note: getArgumentAliasingToReturnedPointer keeps it in sync with
-        // CaptureTracking, which is needed for correctness.  This is because
-        // some intrinsics like launder.invariant.group returns pointers that
-        // are aliasing it's argument, which is known to CaptureTracking.
-        // If AliasAnalysis does not use the same information, it could assume
-        // that pointer returned from launder does not alias it's argument
-        // because launder could not return it if the pointer was not captured.
+        // CaptureTracking can know about special capturing properties of some
+        // intrinsics like launder.invariant.group, that can't be expressed with
+        // the attributes, but have properties like returning aliasing pointer.
+        // Because some analysis may assume that nocaptured pointer is not
+        // returned from some special intrinsic (because function would have to
+        // be marked with returns attribute), it is crucial to use this function
+        // because it should be in sync with CaptureTracking. Not using it may
+        // cause weird miscompilations where 2 aliasing pointers are assumed to
+        // noalias.
         if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
           V = RP;
           continue;

Modified: llvm/trunk/lib/Analysis/ConstantFolding.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ConstantFolding.cpp?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/ConstantFolding.cpp (original)
+++ llvm/trunk/lib/Analysis/ConstantFolding.cpp Sun Jul  1 21:49:30 2018
@@ -1393,6 +1393,7 @@ bool llvm::canConstantFoldCallTo(Immutab
   case Intrinsic::fmuladd:
   case Intrinsic::copysign:
   case Intrinsic::launder_invariant_group:
+  case Intrinsic::strip_invariant_group:
   case Intrinsic::round:
   case Intrinsic::masked_load:
   case Intrinsic::sadd_with_overflow:
@@ -1596,14 +1597,16 @@ Constant *ConstantFoldScalarCall(StringR
         return Constant::getNullValue(Ty);
       if (IntrinsicID == Intrinsic::bswap ||
           IntrinsicID == Intrinsic::bitreverse ||
-          IntrinsicID == Intrinsic::launder_invariant_group)
+          IntrinsicID == Intrinsic::launder_invariant_group ||
+          IntrinsicID == Intrinsic::strip_invariant_group)
         return Operands[0];
     }
 
     if (isa<ConstantPointerNull>(Operands[0]) &&
         Operands[0]->getType()->getPointerAddressSpace() == 0) {
-      // launder(null) == null iff in addrspace 0
-      if (IntrinsicID == Intrinsic::launder_invariant_group)
+      // launder(null) == null == strip(null) iff in addrspace 0
+      if (IntrinsicID == Intrinsic::launder_invariant_group ||
+          IntrinsicID == Intrinsic::strip_invariant_group)
         return Operands[0];
       return nullptr;
     }

Modified: llvm/trunk/lib/Analysis/ValueTracking.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/ValueTracking.cpp?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/ValueTracking.cpp (original)
+++ llvm/trunk/lib/Analysis/ValueTracking.cpp Sun Jul  1 21:49:30 2018
@@ -3404,8 +3404,9 @@ const Value *llvm::getArgumentAliasingTo
 }
 
 bool llvm::isIntrinsicReturningPointerAliasingArgumentWithoutCapturing(
-      ImmutableCallSite CS) {
-  return CS.getIntrinsicID() == Intrinsic::launder_invariant_group;
+    ImmutableCallSite CS) {
+  return CS.getIntrinsicID() == Intrinsic::launder_invariant_group ||
+         CS.getIntrinsicID() == Intrinsic::strip_invariant_group;
 }
 
 /// \p PN defines a loop-variant pointer to an object.  Check if the
@@ -3454,13 +3455,15 @@ Value *llvm::GetUnderlyingObject(Value *
       return V;
     } else {
       if (auto CS = CallSite(V)) {
-        // Note: getArgumentAliasingToReturnedPointer keeps it in sync with
-        // CaptureTracking, which is needed for correctness.  This is because
-        // some intrinsics like launder.invariant.group returns pointers that
-        // are aliasing it's argument, which is known to CaptureTracking.
-        // If AliasAnalysis does not use the same information, it could assume
-        // that pointer returned from launder does not alias it's argument
-        // because launder could not return it if the pointer was not captured.
+        // CaptureTracking can know about special capturing properties of some
+        // intrinsics like launder.invariant.group, that can't be expressed with
+        // the attributes, but have properties like returning aliasing pointer.
+        // Because some analysis may assume that nocaptured pointer is not
+        // returned from some special intrinsic (because function would have to
+        // be marked with returns attribute), it is crucial to use this function
+        // because it should be in sync with CaptureTracking. Not using it may
+        // cause weird miscompilations where 2 aliasing pointers are assumed to
+        // noalias.
         if (auto *RP = getArgumentAliasingToReturnedPointer(CS)) {
           V = RP;
           continue;

Modified: llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp (original)
+++ llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp Sun Jul  1 21:49:30 2018
@@ -1702,6 +1702,7 @@ bool CodeGenPrepare::optimizeCallInst(Ca
       return true;
     }
     case Intrinsic::launder_invariant_group:
+    case Intrinsic::strip_invariant_group:
       II->replaceAllUsesWith(II->getArgOperand(0));
       II->eraseFromParent();
       return true;

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp Sun Jul  1 21:49:30 2018
@@ -1437,6 +1437,7 @@ bool FastISel::selectIntrinsicCall(const
     return true;
   }
   case Intrinsic::launder_invariant_group:
+  case Intrinsic::strip_invariant_group:
   case Intrinsic::expect: {
     unsigned ResultReg = getRegForValue(II->getArgOperand(0));
     if (!ResultReg)

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp Sun Jul  1 21:49:30 2018
@@ -5768,6 +5768,7 @@ SelectionDAGBuilder::visitIntrinsicCall(
   case Intrinsic::annotation:
   case Intrinsic::ptr_annotation:
   case Intrinsic::launder_invariant_group:
+  case Intrinsic::strip_invariant_group:
     // Drop the intrinsic, but forward the value
     setValue(&I, getValue(I.getOperand(0)));
     return nullptr;

Modified: llvm/trunk/lib/IR/Value.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/Value.cpp?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/lib/IR/Value.cpp (original)
+++ llvm/trunk/lib/IR/Value.cpp Sun Jul  1 21:49:30 2018
@@ -521,7 +521,8 @@ static const Value *stripPointerCastsAnd
         // but it can't be marked with returned attribute, that's why it needs
         // special case.
         if (StripKind == PSK_ZeroIndicesAndAliasesAndInvariantGroups &&
-            CS.getIntrinsicID() == Intrinsic::launder_invariant_group) {
+            (CS.getIntrinsicID() == Intrinsic::launder_invariant_group ||
+             CS.getIntrinsicID() == Intrinsic::strip_invariant_group)) {
           V = CS.getArgOperand(0);
           continue;
         }

Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp Sun Jul  1 21:49:30 2018
@@ -457,6 +457,7 @@ static bool isCallPromotable(CallInst *C
   case Intrinsic::invariant_start:
   case Intrinsic::invariant_end:
   case Intrinsic::launder_invariant_group:
+  case Intrinsic::strip_invariant_group:
   case Intrinsic::objectsize:
     return true;
   default:
@@ -882,6 +883,7 @@ bool AMDGPUPromoteAlloca::handleAlloca(A
     case Intrinsic::invariant_start:
     case Intrinsic::invariant_end:
     case Intrinsic::launder_invariant_group:
+    case Intrinsic::strip_invariant_group:
       Intr->eraseFromParent();
       // FIXME: I think the invariant marker should still theoretically apply,
       // but the intrinsics need to be changed to accept pointers with any

Modified: llvm/trunk/test/Analysis/ValueTracking/invariant.group.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/ValueTracking/invariant.group.ll?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/ValueTracking/invariant.group.ll (original)
+++ llvm/trunk/test/Analysis/ValueTracking/invariant.group.ll Sun Jul  1 21:49:30 2018
@@ -1,7 +1,7 @@
 ; RUN: opt -S -instsimplify -instcombine < %s | FileCheck %s
 
-; CHECK-LABEL: define void @checkNonnull()
-define void @checkNonnull() {
+; CHECK-LABEL: define void @checkNonnullLaunder()
+define void @checkNonnullLaunder() {
 ; CHECK:   %p = call i8* @llvm.launder.invariant.group.p0i8(i8* nonnull %0)
 ; CHECK:   %p2 = call i8* @llvm.launder.invariant.group.p0i8(i8* nonnull %p)
 ; CHECK:   call void @use(i8* nonnull %p2)
@@ -15,5 +15,22 @@ entry:
   ret void
 }
 
+; CHECK-LABEL: define void @checkNonnullStrip()
+define void @checkNonnullStrip() {
+; CHECK:   %p = call i8* @llvm.strip.invariant.group.p0i8(i8* nonnull %0)
+; CHECK:   %p2 = call i8* @llvm.strip.invariant.group.p0i8(i8* nonnull %p)
+; CHECK:   call void @use(i8* nonnull %p2)
+entry:
+  %0 = alloca i8, align 8
+
+  %p = call i8* @llvm.strip.invariant.group.p0i8(i8* %0)
+  %p2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %p)
+  call void @use(i8* %p2)
+
+  ret void
+}
+
 declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+declare i8* @llvm.strip.invariant.group.p0i8(i8*)
+
 declare void @use(i8*)

Modified: llvm/trunk/test/CodeGen/Generic/intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/intrinsics.ll?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/intrinsics.ll Sun Jul  1 21:49:30 2018
@@ -41,11 +41,19 @@ define double @test_cos(float %F) {
 
 declare i8* @llvm.launder.invariant.group(i8*)
 
-define i8* @barrier(i8* %p) {
+define i8* @launder(i8* %p) {
         %q = call i8* @llvm.launder.invariant.group(i8* %p)
         ret i8* %q
 }
 
+declare i8* @llvm.strip.invariant.group(i8*)
+
+define i8* @strip(i8* %p) {
+        %q = call i8* @llvm.strip.invariant.group(i8* %p)
+        ret i8* %q
+}
+
+
 ; sideeffect
 
 declare void @llvm.sideeffect()

Added: llvm/trunk/test/Other/invariant.group.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Other/invariant.group.ll?rev=336073&view=auto
==============================================================================
--- llvm/trunk/test/Other/invariant.group.ll (added)
+++ llvm/trunk/test/Other/invariant.group.ll Sun Jul  1 21:49:30 2018
@@ -0,0 +1,104 @@
+; RUN: opt -S -early-cse < %s | FileCheck %s
+; RUN: opt -S -gvn < %s | FileCheck %s
+; RUN: opt -S -newgvn < %s | FileCheck %s
+; RUN: opt -S -O3 < %s | FileCheck %s
+
+; These tests checks if passes with CSE functionality can do CSE on
+; launder.invariant.group, that is prohibited if there is a memory clobber
+; between barriers call.
+
+; CHECK-LABEL: define i8 @optimizable()
+define i8 @optimizable() {
+entry:
+    %ptr = alloca i8
+    store i8 42, i8* %ptr, !invariant.group !0
+; CHECK: call i8* @llvm.launder.invariant.group.p0i8
+    %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+; FIXME: This one could be CSE
+; CHECK: call i8* @llvm.launder.invariant.group
+    %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+; CHECK: call void @clobber(i8* {{.*}}%ptr)
+    call void @clobber(i8* %ptr)
+
+; CHECK: call void @use(i8* {{.*}}%ptr2)
+    call void @use(i8* %ptr2)
+; CHECK: call void @use(i8* {{.*}}%ptr3)
+    call void @use(i8* %ptr3)
+; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
+    %v = load i8, i8* %ptr3, !invariant.group !0
+
+    ret i8 %v
+}
+
+; CHECK-LABEL: define i8 @unoptimizable()
+define i8 @unoptimizable() {
+entry:
+    %ptr = alloca i8
+    store i8 42, i8* %ptr, !invariant.group !0
+; CHECK: call i8* @llvm.launder.invariant.group.p0i8
+    %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+    call void @clobber(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group.p0i8
+    %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+; CHECK: call void @clobber(i8* {{.*}}%ptr)
+    call void @clobber(i8* %ptr)
+; CHECK: call void @use(i8* {{.*}}%ptr2)
+    call void @use(i8* %ptr2)
+; CHECK: call void @use(i8* {{.*}}%ptr3)
+    call void @use(i8* %ptr3)
+; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
+    %v = load i8, i8* %ptr3, !invariant.group !0
+
+    ret i8 %v
+}
+
+; CHECK-LABEL: define i8 @unoptimizable2()
+define i8 @unoptimizable2() {
+    %ptr = alloca i8
+    store i8 42, i8* %ptr, !invariant.group !0
+; CHECK: call i8* @llvm.launder.invariant.group
+    %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+    store i8 43, i8* %ptr
+; CHECK: call i8* @llvm.launder.invariant.group
+    %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+; CHECK: call void @clobber(i8* {{.*}}%ptr)
+    call void @clobber(i8* %ptr)
+; CHECK: call void @use(i8* {{.*}}%ptr2)
+    call void @use(i8* %ptr2)
+; CHECK: call void @use(i8* {{.*}}%ptr3)
+    call void @use(i8* %ptr3)
+; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
+    %v = load i8, i8* %ptr3, !invariant.group !0
+    ret i8 %v
+}
+
+; This test check if optimizer is not proving equality based on mustalias
+; CHECK-LABEL: define void @dontProveEquality(i8* %a)
+define void @dontProveEquality(i8* %a) {
+  %b = call i8* @llvm.launder.invariant.group.p0i8(i8* %a)
+  %r = icmp eq i8* %b, %a
+; CHECK: call void @useBool(i1 %r)
+  call void @useBool(i1 %r)
+
+  %b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
+  %r2 = icmp eq i8* %b2, %a
+; CHECK: call void @useBool(i1 %r2)
+  call void @useBool(i1 %r2)
+
+  ret void
+}
+
+declare void @use(i8* readonly)
+declare void @useBool(i1)
+
+declare void @clobber(i8*)
+; CHECK: Function Attrs: inaccessiblememonly nounwind speculatable{{$}}
+; CHECK-NEXT: declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+
+; CHECK: Function Attrs: nounwind readnone speculatable{{$}}
+; CHECK-NEXT: declare i8* @llvm.strip.invariant.group.p0i8(i8*)
+declare i8* @llvm.strip.invariant.group.p0i8(i8*)
+
+
+!0 = !{}
\ No newline at end of file

Removed: llvm/trunk/test/Other/launder.invariant.group.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Other/launder.invariant.group.ll?rev=336072&view=auto
==============================================================================
--- llvm/trunk/test/Other/launder.invariant.group.ll (original)
+++ llvm/trunk/test/Other/launder.invariant.group.ll (removed)
@@ -1,94 +0,0 @@
-; RUN: opt -S -early-cse < %s | FileCheck %s
-; RUN: opt -S -gvn < %s | FileCheck %s
-; RUN: opt -S -newgvn < %s | FileCheck %s
-; RUN: opt -S -O3 < %s | FileCheck %s
-
-; These tests checks if passes with CSE functionality can do CSE on
-; launder.invariant.group, that is prohibited if there is a memory clobber
-; between barriers call.
-
-; CHECK-LABEL: define i8 @optimizable()
-define i8 @optimizable() {
-entry:
-    %ptr = alloca i8
-    store i8 42, i8* %ptr, !invariant.group !0
-; CHECK: call i8* @llvm.launder.invariant.group.p0i8
-    %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
-; FIXME: This one could be CSE
-; CHECK: call i8* @llvm.launder.invariant.group
-    %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
-; CHECK: call void @clobber(i8* {{.*}}%ptr)
-    call void @clobber(i8* %ptr)
-
-; CHECK: call void @use(i8* {{.*}}%ptr2)
-    call void @use(i8* %ptr2)
-; CHECK: call void @use(i8* {{.*}}%ptr3)
-    call void @use(i8* %ptr3)
-; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
-    %v = load i8, i8* %ptr3, !invariant.group !0
-
-    ret i8 %v
-}
-
-; CHECK-LABEL: define i8 @unoptimizable()
-define i8 @unoptimizable() {
-entry:
-    %ptr = alloca i8
-    store i8 42, i8* %ptr, !invariant.group !0
-; CHECK: call i8* @llvm.launder.invariant.group.p0i8
-    %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
-    call void @clobber(i8* %ptr)
-; CHECK: call i8* @llvm.launder.invariant.group.p0i8
-    %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
-; CHECK: call void @clobber(i8* {{.*}}%ptr)
-    call void @clobber(i8* %ptr)
-; CHECK: call void @use(i8* {{.*}}%ptr2)
-    call void @use(i8* %ptr2)
-; CHECK: call void @use(i8* {{.*}}%ptr3)
-    call void @use(i8* %ptr3)
-; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
-    %v = load i8, i8* %ptr3, !invariant.group !0
-
-    ret i8 %v
-}
-
-; CHECK-LABEL: define i8 @unoptimizable2()
-define i8 @unoptimizable2() {
-    %ptr = alloca i8
-    store i8 42, i8* %ptr, !invariant.group !0
-; CHECK: call i8* @llvm.launder.invariant.group
-    %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
-    store i8 43, i8* %ptr
-; CHECK: call i8* @llvm.launder.invariant.group
-    %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
-; CHECK: call void @clobber(i8* {{.*}}%ptr)
-    call void @clobber(i8* %ptr)
-; CHECK: call void @use(i8* {{.*}}%ptr2)
-    call void @use(i8* %ptr2)
-; CHECK: call void @use(i8* {{.*}}%ptr3)
-    call void @use(i8* %ptr3)
-; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
-    %v = load i8, i8* %ptr3, !invariant.group !0
-    ret i8 %v
-}
-
-; This test check if optimizer is not proving equality based on mustalias
-; CHECK-LABEL: define void @dontProveEquality(i8* %a)
-define void @dontProveEquality(i8* %a) {
-  %b = call i8* @llvm.launder.invariant.group.p0i8(i8* %a)
-  %r = icmp eq i8* %b, %a
-;CHECK: call void @useBool(i1 %r)
-  call void @useBool(i1 %r)
-  ret void
-}
-
-declare void @use(i8* readonly)
-declare void @useBool(i1)
-
-declare void @clobber(i8*)
-; CHECK: Function Attrs: inaccessiblememonly nounwind speculatable{{$}}
-; CHECK-NEXT: declare i8* @llvm.launder.invariant.group.p0i8(i8*)
-declare i8* @llvm.launder.invariant.group.p0i8(i8*)
-
-!0 = !{}
-

Modified: llvm/trunk/test/Transforms/CodeGenPrepare/invariant.group.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/CodeGenPrepare/invariant.group.ll?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/CodeGenPrepare/invariant.group.ll (original)
+++ llvm/trunk/test/Transforms/CodeGenPrepare/invariant.group.ll Sun Jul  1 21:49:30 2018
@@ -7,8 +7,8 @@ define void @foo() {
 enter:
   ; CHECK-NOT: !invariant.group
   ; CHECK-NOT: @llvm.launder.invariant.group.p0i8(
-  ; CHECK: %val = load i8, i8* @tmp, !tbaa
-  %val = load i8, i8* @tmp, !invariant.group !0, !tbaa !{!1, !1, i64 0}
+  ; CHECK: %val = load i8, i8* @tmp{{$}}
+  %val = load i8, i8* @tmp, !invariant.group !0
   %ptr = call i8* @llvm.launder.invariant.group.p0i8(i8* @tmp)
   
   ; CHECK: store i8 42, i8* @tmp{{$}}
@@ -18,7 +18,23 @@ enter:
 }
 ; CHECK-LABEL: }
 
-declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+; CHECK-LABEL: define void @foo2() {
+define void @foo2() {
+enter:
+  ; CHECK-NOT: !invariant.group
+  ; CHECK-NOT: @llvm.strip.invariant.group.p0i8(
+  ; CHECK: %val = load i8, i8* @tmp{{$}}
+  %val = load i8, i8* @tmp, !invariant.group !0
+  %ptr = call i8* @llvm.strip.invariant.group.p0i8(i8* @tmp)
+
+  ; CHECK: store i8 42, i8* @tmp{{$}}
+  store i8 42, i8* %ptr, !invariant.group !0
 
+  ret void
+}
+; CHECK-LABEL: }
+
+
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+declare i8* @llvm.strip.invariant.group.p0i8(i8*)
 !0 = !{}
-!1 = !{!"x", !0}

Modified: llvm/trunk/test/Transforms/DeadStoreElimination/launder.invariant.group.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/DeadStoreElimination/launder.invariant.group.ll?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/DeadStoreElimination/launder.invariant.group.ll (original)
+++ llvm/trunk/test/Transforms/DeadStoreElimination/launder.invariant.group.ll Sun Jul  1 21:49:30 2018
@@ -27,4 +27,39 @@ define void @skip2Barriers(i8* %ptr) {
   ret void
 }
 
+; CHECK-LABEL: void @skip3Barriers(i8* %ptr)
+define void @skip3Barriers(i8* %ptr) {
+; CHECK-NOT: store i8 42
+  store i8 42, i8* %ptr
+; CHECK: %ptr2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr)
+  %ptr2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr)
+; CHECK-NOT: store i8 43
+  store i8 43, i8* %ptr2
+  %ptr3 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr2)
+  %ptr4 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr3)
+
+; CHECK: store i8 44
+  store i8 44, i8* %ptr4
+  ret void
+}
+
+; CHECK-LABEL: void @skip4Barriers(i8* %ptr)
+define void @skip4Barriers(i8* %ptr) {
+; CHECK-NOT: store i8 42
+  store i8 42, i8* %ptr
+; CHECK: %ptr2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr)
+  %ptr2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr)
+; CHECK-NOT: store i8 43
+  store i8 43, i8* %ptr2
+  %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr2)
+  %ptr4 = call i8* @llvm.strip.invariant.group.p0i8(i8* %ptr3)
+  %ptr5 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr3)
+
+; CHECK: store i8 44
+  store i8 44, i8* %ptr5
+  ret void
+}
+
+
 declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+declare i8* @llvm.strip.invariant.group.p0i8(i8*)
\ No newline at end of file

Modified: llvm/trunk/test/Transforms/FunctionAttrs/nocapture.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/FunctionAttrs/nocapture.ll?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/FunctionAttrs/nocapture.ll (original)
+++ llvm/trunk/test/Transforms/FunctionAttrs/nocapture.ll Sun Jul  1 21:49:30 2018
@@ -237,4 +237,21 @@ define void @captureLaunder(i8* %p) {
   ret void
 }
 
+; CHECK: @nocaptureStrip(i8* nocapture %p)
+define void @nocaptureStrip(i8* %p) {
+entry:
+  %b = call i8* @llvm.strip.invariant.group.p0i8(i8* %p)
+  store i8 42, i8* %b
+  ret void
+}
+
+ at g3 = global i8* null
+; CHECK: define void @captureStrip(i8* %p)
+define void @captureStrip(i8* %p) {
+  %b = call i8* @llvm.strip.invariant.group.p0i8(i8* %p)
+  store i8* %b, i8** @g3
+  ret void
+}
+
 declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+declare i8* @llvm.strip.invariant.group.p0i8(i8*)

Modified: llvm/trunk/test/Transforms/GVN/invariant.group.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/GVN/invariant.group.ll?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/GVN/invariant.group.ll (original)
+++ llvm/trunk/test/Transforms/GVN/invariant.group.ll Sun Jul  1 21:49:30 2018
@@ -51,6 +51,18 @@ entry:
     ret i8 %b
 }
 
+; CHECK-LABEL: define i1 @proveEqualityForStrip(
+define i1 @proveEqualityForStrip(i8* %a) {
+; FIXME: The first call could be also removed by GVN. Right now
+; DCE removes it. The second call is CSE'd with the first one.
+; CHECK: %b1 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
+  %b1 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
+; CHECK-NOT: llvm.strip.invariant.group
+  %b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
+  %r = icmp eq i8* %b1, %b2
+; CHECK: ret i1 true
+  ret i1 %r
+}
 ; CHECK-LABEL: define i8 @unoptimizable1() {
 define i8 @unoptimizable1() {
 entry:
@@ -437,10 +449,10 @@ declare void @_ZN1AC1Ev(%struct.A*)
 declare void @fooBit(i1*, i1)
 
 declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+declare i8* @llvm.strip.invariant.group.p0i8(i8*)
+
 
-; Function Attrs: nounwind
-declare void @llvm.assume(i1 %cmp.vtables) #0
+declare void @llvm.assume(i1 %cmp.vtables)
 
 
-attributes #0 = { nounwind }
 !0 = !{}
\ No newline at end of file

Removed: llvm/trunk/test/Transforms/GlobalOpt/invariant.group.barrier.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/GlobalOpt/invariant.group.barrier.ll?rev=336072&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/GlobalOpt/invariant.group.barrier.ll (original)
+++ llvm/trunk/test/Transforms/GlobalOpt/invariant.group.barrier.ll (removed)
@@ -1,79 +0,0 @@
-; RUN: opt -S -globalopt < %s | FileCheck %s
-
-; This test is hint, what could globalOpt optimize and what it can't
-; FIXME: @tmp and @tmp2 can be safely set to 42
-; CHECK: @tmp = local_unnamed_addr global i32 0
-; CHECK: @tmp2 = local_unnamed_addr global i32 0
-; CHECK: @tmp3 = global i32 0
-
- at tmp = global i32 0
- at tmp2 = global i32 0
- at tmp3 = global i32 0
- at ptrToTmp3 = global i32* null
-
- at llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_a }]
-
-define i32 @TheAnswerToLifeTheUniverseAndEverything() {
-  ret i32 42
-}
-
-define void @_GLOBAL__I_a() {
-enter:
-  call void @_optimizable()
-  call void @_not_optimizable()
-  ret void
-}
-
-define void @_optimizable() {
-enter:
-  %valptr = alloca i32
-  
-  %val = call i32 @TheAnswerToLifeTheUniverseAndEverything()
-  store i32 %val, i32* @tmp
-  store i32 %val, i32* %valptr
-  
-  %0 = bitcast i32* %valptr to i8*
-  %barr = call i8* @llvm.launder.invariant.group(i8* %0)
-  %1 = bitcast i8* %barr to i32*
-  
-  %val2 = load i32, i32* %1
-  store i32 %val2, i32* @tmp2
-  ret void
-}
-
-; We can't step through launder.invariant.group here, because that would change
-; this load in @usage_of_globals()
-; val = load i32, i32* %ptrVal, !invariant.group !0 
-; into 
-; %val = load i32, i32* @tmp3, !invariant.group !0
-; and then we could assume that %val and %val2 to be the same, which coud be 
-; false, because @changeTmp3ValAndCallBarrierInside() may change the value
-; of @tmp3.
-define void @_not_optimizable() {
-enter:
-  store i32 13, i32* @tmp3, !invariant.group !0
-  
-  %0 = bitcast i32* @tmp3 to i8*
-  %barr = call i8* @llvm.launder.invariant.group(i8* %0)
-  %1 = bitcast i8* %barr to i32*
-  
-  store i32* %1, i32** @ptrToTmp3
-  store i32 42, i32* %1, !invariant.group !0
-  
-  ret void
-}
-define void @usage_of_globals() {
-entry:
-  %ptrVal = load i32*, i32** @ptrToTmp3
-  %val = load i32, i32* %ptrVal, !invariant.group !0
-  
-  call void @changeTmp3ValAndCallBarrierInside()
-  %val2 = load i32, i32* @tmp3, !invariant.group !0
-  ret void;
-}
-
-declare void @changeTmp3ValAndCallBarrierInside()
-
-declare i8* @llvm.launder.invariant.group(i8*)
-
-!0 = !{}

Added: llvm/trunk/test/Transforms/GlobalOpt/invariant.group.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/GlobalOpt/invariant.group.ll?rev=336073&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/GlobalOpt/invariant.group.ll (added)
+++ llvm/trunk/test/Transforms/GlobalOpt/invariant.group.ll Sun Jul  1 21:49:30 2018
@@ -0,0 +1,79 @@
+; RUN: opt -S -globalopt < %s | FileCheck %s
+
+; This test is hint, what could globalOpt optimize and what it can't
+; FIXME: @tmp and @tmp2 can be safely set to 42
+; CHECK: @tmp = local_unnamed_addr global i32 0
+; CHECK: @tmp2 = local_unnamed_addr global i32 0
+; CHECK: @tmp3 = global i32 0
+
+ at tmp = global i32 0
+ at tmp2 = global i32 0
+ at tmp3 = global i32 0
+ at ptrToTmp3 = global i32* null
+
+ at llvm.global_ctors = appending global [1 x { i32, void ()* }] [{ i32, void ()* } { i32 65535, void ()* @_GLOBAL__I_a }]
+
+define i32 @TheAnswerToLifeTheUniverseAndEverything() {
+  ret i32 42
+}
+
+define void @_GLOBAL__I_a() {
+enter:
+  call void @_optimizable()
+  call void @_not_optimizable()
+  ret void
+}
+
+define void @_optimizable() {
+enter:
+  %valptr = alloca i32
+
+  %val = call i32 @TheAnswerToLifeTheUniverseAndEverything()
+  store i32 %val, i32* @tmp
+  store i32 %val, i32* %valptr
+
+  %0 = bitcast i32* %valptr to i8*
+  %barr = call i8* @llvm.launder.invariant.group(i8* %0)
+  %1 = bitcast i8* %barr to i32*
+
+  %val2 = load i32, i32* %1
+  store i32 %val2, i32* @tmp2
+  ret void
+}
+
+; We can't step through launder.invariant.group here, because that would change
+; this load in @usage_of_globals()
+; val = load i32, i32* %ptrVal, !invariant.group !0
+; into
+; %val = load i32, i32* @tmp3, !invariant.group !0
+; and then we could assume that %val and %val2 to be the same, which coud be
+; false, because @changeTmp3ValAndCallBarrierInside() may change the value
+; of @tmp3.
+define void @_not_optimizable() {
+enter:
+  store i32 13, i32* @tmp3, !invariant.group !0
+
+  %0 = bitcast i32* @tmp3 to i8*
+  %barr = call i8* @llvm.launder.invariant.group(i8* %0)
+  %1 = bitcast i8* %barr to i32*
+
+  store i32* %1, i32** @ptrToTmp3
+  store i32 42, i32* %1, !invariant.group !0
+
+  ret void
+}
+define void @usage_of_globals() {
+entry:
+  %ptrVal = load i32*, i32** @ptrToTmp3
+  %val = load i32, i32* %ptrVal, !invariant.group !0
+
+  call void @changeTmp3ValAndCallBarrierInside()
+  %val2 = load i32, i32* @tmp3, !invariant.group !0
+  ret void;
+}
+
+declare void @changeTmp3ValAndCallBarrierInside()
+
+declare i8* @llvm.launder.invariant.group(i8*)
+
+!0 = !{}

Modified: llvm/trunk/test/Transforms/InstCombine/invariant.group.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/invariant.group.ll?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/invariant.group.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/invariant.group.ll Sun Jul  1 21:49:30 2018
@@ -1,5 +1,6 @@
 ; RUN: opt -instcombine -S < %s | FileCheck %s
 
+
 ; CHECK-LABEL: define i8* @simplifyNullLaunder()
 define i8* @simplifyNullLaunder() {
 ; CHECK-NEXT: ret i8* null
@@ -29,6 +30,39 @@ define i8 addrspace(42)* @simplifyUndefL
   ret i8 addrspace(42)* %b2
 }
 
-
 declare i8* @llvm.launder.invariant.group.p0i8(i8*)
 declare i8 addrspace(42)* @llvm.launder.invariant.group.p42i8(i8 addrspace(42)*)
+
+
+; CHECK-LABEL: define i8* @simplifyNullStrip()
+define i8* @simplifyNullStrip() {
+; CHECK-NEXT: ret i8* null
+  %b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* null)
+  ret i8* %b2
+}
+
+; CHECK-LABEL: define i8 addrspace(42)* @dontsimplifyNullStripForDifferentAddrspace()
+define i8 addrspace(42)* @dontsimplifyNullStripForDifferentAddrspace() {
+; CHECK: %b2 = call i8 addrspace(42)* @llvm.strip.invariant.group.p42i8(i8 addrspace(42)* null)
+; CHECK: ret i8 addrspace(42)* %b2
+  %b2 = call i8 addrspace(42)* @llvm.strip.invariant.group.p42i8(i8 addrspace(42)* null)
+  ret i8 addrspace(42)* %b2
+}
+
+; CHECK-LABEL: define i8* @simplifyUndefStrip()
+define i8* @simplifyUndefStrip() {
+; CHECK-NEXT: ret i8* undef
+  %b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* undef)
+  ret i8* %b2
+}
+
+; CHECK-LABEL: define i8 addrspace(42)* @simplifyUndefStrip2()
+define i8 addrspace(42)* @simplifyUndefStrip2() {
+; CHECK-NEXT: ret i8 addrspace(42)* undef
+  %b2 = call i8 addrspace(42)* @llvm.strip.invariant.group.p42i8(i8 addrspace(42)* undef)
+  ret i8 addrspace(42)* %b2
+}
+
+declare i8* @llvm.strip.invariant.group.p0i8(i8*)
+declare i8 addrspace(42)* @llvm.strip.invariant.group.p42i8(i8 addrspace(42)*)
+

Modified: llvm/trunk/test/Transforms/NewGVN/invariant.group.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/NewGVN/invariant.group.ll?rev=336073&r1=336072&r2=336073&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/NewGVN/invariant.group.ll (original)
+++ llvm/trunk/test/Transforms/NewGVN/invariant.group.ll Sun Jul  1 21:49:30 2018
@@ -52,6 +52,19 @@ entry:
     ret i8 %b
 }
 
+; CHECK-LABEL: define i1 @proveEqualityForStrip(
+define i1 @proveEqualityForStrip(i8* %a) {
+; FIXME: The first call could be also removed by GVN. Right now
+; DCE removes it. The second call is CSE'd with the first one.
+; CHECK: %b1 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
+  %b1 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
+; CHECK-NOT: llvm.strip.invariant.group
+  %b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
+  %r = icmp eq i8* %b1, %b2
+; CHECK: ret i1 true
+  ret i1 %r
+}
+
 ; CHECK-LABEL: define i8 @unoptimizable1() {
 define i8 @unoptimizable1() {
 entry:




More information about the llvm-commits mailing list