[llvm] r331448 - Rename invariant.group.barrier to launder.invariant.group
Piotr Padlewski via llvm-commits
llvm-commits at lists.llvm.org
Thu May 3 04:03:02 PDT 2018
Author: prazek
Date: Thu May 3 04:03:01 2018
New Revision: 331448
URL: http://llvm.org/viewvc/llvm-project?rev=331448&view=rev
Log:
Rename invariant.group.barrier to launder.invariant.group
Summary:
This is one of the initial commit of "RFC: Devirtualization v2" proposal:
https://docs.google.com/document/d/16GVtCpzK8sIHNc2qZz6RN8amICNBtvjWUod2SujZVEo/edit?usp=sharing
Reviewers: rsmith, amharc, kuhar, sanjoy
Subscribers: arsenm, nhaehnle, javed.absar, hiraditya, llvm-commits
Differential Revision: https://reviews.llvm.org/D45111
Added:
llvm/trunk/test/Bitcode/upgrade-invariant-group-barrier.ll
llvm/trunk/test/Other/launder.invariant.group.ll
Removed:
llvm/trunk/test/Other/Inputs/invariant.group.barrier.ll
llvm/trunk/test/Other/invariant.group.barrier.ll
Modified:
llvm/trunk/docs/LangRef.rst
llvm/trunk/docs/ReleaseNotes.rst
llvm/trunk/include/llvm/IR/IRBuilder.h
llvm/trunk/include/llvm/IR/Intrinsics.td
llvm/trunk/include/llvm/IR/Value.h
llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp
llvm/trunk/lib/Analysis/MemorySSA.cpp
llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp
llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
llvm/trunk/lib/IR/AutoUpgrade.cpp
llvm/trunk/lib/IR/Value.cpp
llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
llvm/trunk/test/Analysis/MemorySSA/invariant-groups.ll
llvm/trunk/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll
llvm/trunk/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll
llvm/trunk/test/CodeGen/Generic/intrinsics.ll
llvm/trunk/test/Transforms/CodeGenPrepare/invariant.group.ll
llvm/trunk/test/Transforms/GVN/invariant.group.ll
llvm/trunk/test/Transforms/GlobalOpt/invariant.group.barrier.ll
llvm/trunk/test/Transforms/NewGVN/invariant.group.ll
Modified: llvm/trunk/docs/LangRef.rst
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/docs/LangRef.rst?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/docs/LangRef.rst (original)
+++ llvm/trunk/docs/LangRef.rst Thu May 3 04:03:01 2018
@@ -5318,7 +5318,7 @@ The experimental ``invariant.group`` met
The existence of the ``invariant.group`` metadata on the instruction tells
the optimizer that every ``load`` and ``store`` to the same pointer operand
within the same invariant group can be assumed to load or store the same
-value (but see the ``llvm.invariant.group.barrier`` intrinsic which affects
+value (but see the ``llvm.launder.invariant.group`` intrinsic which affects
when two pointers are considered the same). Pointers returned by bitcast or
getelementptr with only zero indices are considered the same.
@@ -5343,13 +5343,13 @@ Examples:
store i8 %unknownValue, i8* %ptr, !invariant.group !0 ; Can assume that %unknownValue == 42
call void @foo(i8* %ptr)
- %newPtr2 = call i8* @llvm.invariant.group.barrier(i8* %ptr)
- %d = load i8, i8* %newPtr2, !invariant.group !0 ; Can't step through invariant.group.barrier to get value of %ptr
+ %newPtr2 = call i8* @llvm.launder.invariant.group(i8* %ptr)
+ %d = load i8, i8* %newPtr2, !invariant.group !0 ; Can't step through launder.invariant.group to get value of %ptr
...
declare void @foo(i8*)
declare i8* @getPointer(i8*)
- declare i8* @llvm.invariant.group.barrier(i8*)
+ declare i8* @llvm.launder.invariant.group(i8*)
!0 = !{!"magic ptr"}
!1 = !{!"other ptr"}
@@ -12908,7 +12908,7 @@ Semantics:
This intrinsic indicates that the memory is mutable again.
-'``llvm.invariant.group.barrier``' Intrinsic
+'``llvm.launder.invariant.group``' Intrinsic
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
@@ -12919,12 +12919,12 @@ argument.
::
- declare i8* @llvm.invariant.group.barrier.p0i8(i8* <ptr>)
+ declare i8* @llvm.launder.invariant.group.p0i8(i8* <ptr>)
Overview:
"""""""""
-The '``llvm.invariant.group.barrier``' intrinsic can be used when an invariant
+The '``llvm.launder.invariant.group``' intrinsic can be used when an invariant
established by invariant.group metadata no longer holds, to obtain a new pointer
value that does not carry the invariant information. It is an experimental
intrinsic, which means that its semantics might change in the future.
@@ -12933,7 +12933,7 @@ intrinsic, which means that its semantic
Arguments:
""""""""""
-The ``llvm.invariant.group.barrier`` takes only one argument, which is
+The ``llvm.launder.invariant.group`` takes only one argument, which is
the pointer to the memory for which the ``invariant.group`` no longer holds.
Semantics:
@@ -12941,6 +12941,7 @@ Semantics:
Returns another pointer that aliases its argument but which is considered different
for the purposes of ``load``/``store`` ``invariant.group`` metadata.
+It does not read any accessible memory and the execution can be speculated.
.. _constrainedfp:
Modified: llvm/trunk/docs/ReleaseNotes.rst
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/docs/ReleaseNotes.rst?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/docs/ReleaseNotes.rst (original)
+++ llvm/trunk/docs/ReleaseNotes.rst Thu May 3 04:03:01 2018
@@ -107,6 +107,8 @@ Changes to the LLVM IR
have changed. Alignment is no longer an argument, and are instead conveyed as
parameter attributes.
+* invariant.group.barrier has been renamed to launder.invariant.group.
+
Changes to the ARM Backend
--------------------------
Modified: llvm/trunk/include/llvm/IR/IRBuilder.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/IRBuilder.h?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/IRBuilder.h (original)
+++ llvm/trunk/include/llvm/IR/IRBuilder.h Thu May 3 04:03:01 2018
@@ -1962,28 +1962,26 @@ public:
Name);
}
- /// Create an invariant.group.barrier intrinsic call, that stops
- /// optimizer to propagate equality using invariant.group metadata.
- /// If Ptr type is different from pointer to i8, it's casted to pointer to i8
- /// in the same address space before call and casted back to Ptr type after
- /// call.
- Value *CreateInvariantGroupBarrier(Value *Ptr) {
+ /// Create a launder.invariant.group intrinsic call. If Ptr type is
+ /// different from pointer to i8, it's casted to pointer to i8 in the same
+ /// address space before call and casted back to Ptr type after call.
+ Value *CreateLaunderInvariantGroup(Value *Ptr) {
assert(isa<PointerType>(Ptr->getType()) &&
- "invariant.group.barrier only applies to pointers.");
+ "launder.invariant.group only applies to pointers.");
auto *PtrType = Ptr->getType();
auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
if (PtrType != Int8PtrTy)
Ptr = CreateBitCast(Ptr, Int8PtrTy);
Module *M = BB->getParent()->getParent();
- Function *FnInvariantGroupBarrier = Intrinsic::getDeclaration(
- M, Intrinsic::invariant_group_barrier, {Int8PtrTy});
+ Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
+ M, Intrinsic::launder_invariant_group, {Int8PtrTy});
- assert(FnInvariantGroupBarrier->getReturnType() == Int8PtrTy &&
- FnInvariantGroupBarrier->getFunctionType()->getParamType(0) ==
+ assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
+ FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
Int8PtrTy &&
- "InvariantGroupBarrier should take and return the same type");
+ "LaunderInvariantGroup should take and return the same type");
- CallInst *Fn = CreateCall(FnInvariantGroupBarrier, {Ptr});
+ CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr});
if (PtrType != Int8PtrTy)
return CreateBitCast(Fn, PtrType);
Modified: llvm/trunk/include/llvm/IR/Intrinsics.td
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/Intrinsics.td?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/Intrinsics.td (original)
+++ llvm/trunk/include/llvm/IR/Intrinsics.td Thu May 3 04:03:01 2018
@@ -710,7 +710,7 @@ def int_invariant_end : Intrinsic<[],
llvm_anyptr_ty],
[IntrArgMemOnly, NoCapture<2>]>;
-// invariant.group.barrier can't be marked with 'readnone' (IntrNoMem),
+// launder.invariant.group can't be marked with 'readnone' (IntrNoMem),
// because it would cause CSE of two barriers with the same argument.
// Inaccessiblememonly says that the barrier doesn't read the argument,
// but it changes state not accessible to this module. This way
@@ -722,9 +722,9 @@ def int_invariant_end : Intrinsic<[],
// it would remove barrier.
// Note that it is still experimental, which means that its semantics
// might change in the future.
-def int_invariant_group_barrier : Intrinsic<[llvm_anyptr_ty],
+def int_launder_invariant_group : Intrinsic<[llvm_anyptr_ty],
[LLVMMatchType<0>],
- [IntrInaccessibleMemOnly]>;
+ [IntrInaccessibleMemOnly, IntrSpeculatable]>;
//===------------------------ Stackmap Intrinsics -------------------------===//
//
Modified: llvm/trunk/include/llvm/IR/Value.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/IR/Value.h?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/include/llvm/IR/Value.h (original)
+++ llvm/trunk/include/llvm/IR/Value.h Thu May 3 04:03:01 2018
@@ -509,15 +509,16 @@ public:
static_cast<const Value *>(this)->stripPointerCasts());
}
- /// Strip off pointer casts, all-zero GEPs, aliases and barriers.
+ /// Strip off pointer casts, all-zero GEPs, aliases and invariant group
+ /// info.
///
/// Returns the original uncasted value. If this is called on a non-pointer
/// value, it returns 'this'. This function should be used only in
/// Alias analysis.
- const Value *stripPointerCastsAndBarriers() const;
- Value *stripPointerCastsAndBarriers() {
+ const Value *stripPointerCastsAndInvariantGroups() const;
+ Value *stripPointerCastsAndInvariantGroups() {
return const_cast<Value *>(
- static_cast<const Value *>(this)->stripPointerCastsAndBarriers());
+ static_cast<const Value *>(this)->stripPointerCastsAndInvariantGroups());
}
/// Strip off pointer casts and all-zero GEPs.
Modified: llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp (original)
+++ llvm/trunk/lib/Analysis/BasicAliasAnalysis.cpp Thu May 3 04:03:01 2018
@@ -985,8 +985,8 @@ static AliasResult aliasSameBasePointerG
const GEPOperator *GEP2,
uint64_t V2Size,
const DataLayout &DL) {
- assert(GEP1->getPointerOperand()->stripPointerCastsAndBarriers() ==
- GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
+ assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
+ GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
"Expected GEPs with the same pointer operand");
@@ -1264,8 +1264,8 @@ AliasResult BasicAAResult::aliasGEP(cons
// If we know the two GEPs are based off of the exact same pointer (and not
// just the same underlying object), see if that tells us anything about
// the resulting pointers.
- if (GEP1->getPointerOperand()->stripPointerCastsAndBarriers() ==
- GEP2->getPointerOperand()->stripPointerCastsAndBarriers() &&
+ if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
+ GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
// If we couldn't find anything interesting, don't abandon just yet.
@@ -1578,8 +1578,8 @@ AliasResult BasicAAResult::aliasCheck(co
return NoAlias;
// Strip off any casts if they exist.
- V1 = V1->stripPointerCastsAndBarriers();
- V2 = V2->stripPointerCastsAndBarriers();
+ V1 = V1->stripPointerCastsAndInvariantGroups();
+ V2 = V2->stripPointerCastsAndInvariantGroups();
// If V1 or V2 is undef, the result is NoAlias because we can always pick a
// value for undef that aliases nothing in the program.
Modified: llvm/trunk/lib/Analysis/MemorySSA.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/MemorySSA.cpp?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/MemorySSA.cpp (original)
+++ llvm/trunk/lib/Analysis/MemorySSA.cpp Thu May 3 04:03:01 2018
@@ -352,9 +352,6 @@ static bool isUseTriviallyOptimizableToL
const Instruction *I) {
// If the memory can't be changed, then loads of the memory can't be
// clobbered.
- //
- // FIXME: We should handle invariant groups, as well. It's a bit harder,
- // because we need to pay close attention to invariant group barriers.
return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
AA.pointsToConstantMemory(cast<LoadInst>(I)->
getPointerOperand()));
Modified: llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp (original)
+++ llvm/trunk/lib/CodeGen/CodeGenPrepare.cpp Thu May 3 04:03:01 2018
@@ -1668,7 +1668,7 @@ bool CodeGenPrepare::optimizeCallInst(Ca
InsertedInsts.insert(ExtVal);
return true;
}
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
II->replaceAllUsesWith(II->getArgOperand(0));
II->eraseFromParent();
return true;
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/FastISel.cpp Thu May 3 04:03:01 2018
@@ -1444,7 +1444,7 @@ bool FastISel::selectIntrinsicCall(const
updateValueMap(II, ResultReg);
return true;
}
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
case Intrinsic::expect: {
unsigned ResultReg = getRegForValue(II->getArgOperand(0));
if (!ResultReg)
Modified: llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp Thu May 3 04:03:01 2018
@@ -5710,7 +5710,7 @@ SelectionDAGBuilder::visitIntrinsicCall(
}
case Intrinsic::annotation:
case Intrinsic::ptr_annotation:
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
// Drop the intrinsic, but forward the value
setValue(&I, getValue(I.getOperand(0)));
return nullptr;
Modified: llvm/trunk/lib/IR/AutoUpgrade.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/AutoUpgrade.cpp?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/lib/IR/AutoUpgrade.cpp (original)
+++ llvm/trunk/lib/IR/AutoUpgrade.cpp Thu May 3 04:03:01 2018
@@ -528,6 +528,17 @@ static bool UpgradeIntrinsicFunction1(Fu
return true;
}
}
+ if (Name.startswith("invariant.group.barrier")) {
+ // Rename invariant.group.barrier to launder.invariant.group
+ auto Args = F->getFunctionType()->params();
+ Type* ObjectPtr[1] = {Args[0]};
+ rename(F);
+ NewFn = Intrinsic::getDeclaration(F->getParent(),
+ Intrinsic::launder_invariant_group, ObjectPtr);
+ return true;
+
+ }
+
break;
}
case 'm': {
Modified: llvm/trunk/lib/IR/Value.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/IR/Value.cpp?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/lib/IR/Value.cpp (original)
+++ llvm/trunk/lib/IR/Value.cpp Thu May 3 04:03:01 2018
@@ -499,7 +499,7 @@ namespace {
enum PointerStripKind {
PSK_ZeroIndices,
PSK_ZeroIndicesAndAliases,
- PSK_ZeroIndicesAndAliasesAndBarriers,
+ PSK_ZeroIndicesAndAliasesAndInvariantGroups,
PSK_InBoundsConstantIndices,
PSK_InBounds
};
@@ -518,7 +518,7 @@ static const Value *stripPointerCastsAnd
if (auto *GEP = dyn_cast<GEPOperator>(V)) {
switch (StripKind) {
case PSK_ZeroIndicesAndAliases:
- case PSK_ZeroIndicesAndAliasesAndBarriers:
+ case PSK_ZeroIndicesAndAliasesAndInvariantGroups:
case PSK_ZeroIndices:
if (!GEP->hasAllZeroIndices())
return V;
@@ -546,11 +546,11 @@ static const Value *stripPointerCastsAnd
V = RV;
continue;
}
- // The result of invariant.group.barrier must alias it's argument,
+ // The result of launder.invariant.group must alias it's argument,
// but it can't be marked with returned attribute, that's why it needs
// special case.
- if (StripKind == PSK_ZeroIndicesAndAliasesAndBarriers &&
- CS.getIntrinsicID() == Intrinsic::invariant_group_barrier) {
+ if (StripKind == PSK_ZeroIndicesAndAliasesAndInvariantGroups &&
+ CS.getIntrinsicID() == Intrinsic::launder_invariant_group) {
V = CS.getArgOperand(0);
continue;
}
@@ -576,8 +576,8 @@ const Value *Value::stripInBoundsConstan
return stripPointerCastsAndOffsets<PSK_InBoundsConstantIndices>(this);
}
-const Value *Value::stripPointerCastsAndBarriers() const {
- return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliasesAndBarriers>(
+const Value *Value::stripPointerCastsAndInvariantGroups() const {
+ return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliasesAndInvariantGroups>(
this);
}
Modified: llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp (original)
+++ llvm/trunk/lib/Target/AMDGPU/AMDGPUPromoteAlloca.cpp Thu May 3 04:03:01 2018
@@ -454,7 +454,7 @@ static bool isCallPromotable(CallInst *C
case Intrinsic::lifetime_end:
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
case Intrinsic::objectsize:
return true;
default:
@@ -878,7 +878,7 @@ bool AMDGPUPromoteAlloca::handleAlloca(A
}
case Intrinsic::invariant_start:
case Intrinsic::invariant_end:
- case Intrinsic::invariant_group_barrier:
+ case Intrinsic::launder_invariant_group:
Intr->eraseFromParent();
// FIXME: I think the invariant marker should still theoretically apply,
// but the intrinsics need to be changed to accept pointers with any
Modified: llvm/trunk/test/Analysis/MemorySSA/invariant-groups.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Analysis/MemorySSA/invariant-groups.ll?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/test/Analysis/MemorySSA/invariant-groups.ll (original)
+++ llvm/trunk/test/Analysis/MemorySSA/invariant-groups.ll Thu May 3 04:03:01 2018
@@ -1,7 +1,7 @@
; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
;
; Currently, MemorySSA doesn't support invariant groups. So, we should ignore
-; invariant.group.barrier intrinsics entirely. We'll need to pay attention to
+; launder.invariant.group intrinsics entirely. We'll need to pay attention to
; them when/if we decide to support invariant groups.
@g = external global i32
@@ -17,8 +17,8 @@ define i32 @foo(i32* %a) {
%1 = bitcast i32* %a to i8*
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
- %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+ %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32*
; This have to be MemoryUse(2), because we can't skip the barrier based on
@@ -36,8 +36,8 @@ define i32 @skipBarrier(i32* %a) {
%1 = bitcast i32* %a to i8*
; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
- %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+ %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32*
; We can skip the barrier only if the "skip" is not based on !invariant.group.
@@ -55,8 +55,8 @@ define i32 @skipBarrier2(i32* %a) {
%1 = bitcast i32* %a to i8*
; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
- %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+ %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32*
; We can skip the barrier only if the "skip" is not based on !invariant.group.
@@ -86,8 +86,8 @@ define i32 @handleInvariantGroups(i32* %
store i32 1, i32* @g, align 4
%1 = bitcast i32* %a to i8*
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
- %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+ %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
%a32 = bitcast i8* %a8 to i32*
; CHECK: MemoryUse(2)
@@ -145,8 +145,8 @@ entry:
call void @clobber8(i8* %p)
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
- %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
+; CHECK-NEXT: %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
+ %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
br i1 undef, label %Loop.Body, label %Loop.End
Loop.Body:
@@ -192,8 +192,8 @@ entry:
call void @clobber8(i8* %p)
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
- %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
+; CHECK-NEXT: %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
+ %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
br i1 undef, label %Loop.Body, label %Loop.End
Loop.Body:
@@ -253,8 +253,8 @@ entry:
; CHECK-NEXT: call void @clobber
call void @clobber8(i8* %p)
; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
- %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
+; CHECK-NEXT: %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
+ %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
br i1 undef, label %Loop.Pre, label %Loop.End
Loop.Pre:
@@ -302,12 +302,12 @@ entry:
; CHECK-NEXT: store i8 42, i8* %ptr, !invariant.group !0
store i8 42, i8* %ptr, !invariant.group !0
; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: call i8* @llvm.invariant.group.barrier
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK-NEXT: call i8* @llvm.launder.invariant.group
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; FIXME: This one could be CSEd.
; CHECK: 3 = MemoryDef(2)
-; CHECK: call i8* @llvm.invariant.group.barrier
- %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group
+ %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK: 4 = MemoryDef(3)
; CHECK-NEXT: call void @clobber8(i8* %ptr)
call void @clobber8(i8* %ptr)
@@ -331,13 +331,13 @@ define i8 @unoptimizable2() {
; CHECK-NEXT: store i8 42, i8* %ptr, !invariant.group !0
store i8 42, i8* %ptr, !invariant.group !0
; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: call i8* @llvm.invariant.group.barrier
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK-NEXT: call i8* @llvm.launder.invariant.group
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK: 3 = MemoryDef(2)
store i8 43, i8* %ptr
; CHECK: 4 = MemoryDef(3)
-; CHECK-NEXT: call i8* @llvm.invariant.group.barrier
- %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK-NEXT: call i8* @llvm.launder.invariant.group
+ %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK: 5 = MemoryDef(4)
; CHECK-NEXT: call void @clobber8(i8* %ptr)
call void @clobber8(i8* %ptr)
@@ -354,7 +354,7 @@ define i8 @unoptimizable2() {
}
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
declare void @clobber(i32*)
declare void @clobber8(i8*)
declare void @use(i8* readonly)
Added: llvm/trunk/test/Bitcode/upgrade-invariant-group-barrier.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Bitcode/upgrade-invariant-group-barrier.ll?rev=331448&view=auto
==============================================================================
--- llvm/trunk/test/Bitcode/upgrade-invariant-group-barrier.ll (added)
+++ llvm/trunk/test/Bitcode/upgrade-invariant-group-barrier.ll Thu May 3 04:03:01 2018
@@ -0,0 +1,22 @@
+; RUN: opt -S < %s | FileCheck %s
+
+; The intrinsic firstly only took i8*, then it was made polimorphic, then
+; it was renamed to launder.invariant.group
+define void @test(i8* %p1, i16* %p16) {
+; CHECK-LABEL: @test
+; CHECK: %p2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %p1)
+; CHECK: %p3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %p1)
+; CHECK: %p4 = call i16* @llvm.launder.invariant.group.p0i16(i16* %p16)
+ %p2 = call i8* @llvm.invariant.group.barrier(i8* %p1)
+ %p3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p1)
+ %p4 = call i16* @llvm.invariant.group.barrier.p0i16(i16* %p16)
+ ret void
+}
+
+; CHECK: Function Attrs: inaccessiblememonly nounwind speculatable
+; CHECK: declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+; CHECK: Function Attrs: inaccessiblememonly nounwind speculatable
+; CHECK: declare i16* @llvm.launder.invariant.group.p0i16(i16*)
+declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i16* @llvm.invariant.group.barrier.p0i16(i16*)
Modified: llvm/trunk/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll Thu May 3 04:03:01 2018
@@ -1,10 +1,10 @@
; RUN: llc -O0 -mtriple=arm64 < %s
-declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.launder.invariant.group(i8*)
define i8* @barrier(i8* %p) {
-; CHECK: bl llvm.invariant.group.barrier
- %q = call i8* @llvm.invariant.group.barrier(i8* %p)
+; CHECK: bl llvm.launder.invariant.group
+ %q = call i8* @llvm.launder.invariant.group(i8* %p)
ret i8* %q
}
Modified: llvm/trunk/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll (original)
+++ llvm/trunk/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll Thu May 3 04:03:01 2018
@@ -3,7 +3,7 @@ target datalayout = "A5"
declare {}* @llvm.invariant.start.p5i8(i64, i8 addrspace(5)* nocapture) #0
declare void @llvm.invariant.end.p5i8({}*, i64, i8 addrspace(5)* nocapture) #0
-declare i8 addrspace(5)* @llvm.invariant.group.barrier.p5i8(i8 addrspace(5)*) #1
+declare i8 addrspace(5)* @llvm.launder.invariant.group.p5i8(i8 addrspace(5)*) #1
; GCN-LABEL: {{^}}use_invariant_promotable_lds:
; GCN: buffer_load_dword
@@ -17,7 +17,7 @@ bb:
store i32 %tmp3, i32 addrspace(5)* %tmp
%tmp4 = call {}* @llvm.invariant.start.p5i8(i64 4, i8 addrspace(5)* %tmp1) #0
call void @llvm.invariant.end.p5i8({}* %tmp4, i64 4, i8 addrspace(5)* %tmp1) #0
- %tmp5 = call i8 addrspace(5)* @llvm.invariant.group.barrier.p5i8(i8 addrspace(5)* %tmp1) #1
+ %tmp5 = call i8 addrspace(5)* @llvm.launder.invariant.group.p5i8(i8 addrspace(5)* %tmp1) #1
ret void
}
Modified: llvm/trunk/test/CodeGen/Generic/intrinsics.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/Generic/intrinsics.ll?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/Generic/intrinsics.ll (original)
+++ llvm/trunk/test/CodeGen/Generic/intrinsics.ll Thu May 3 04:03:01 2018
@@ -39,10 +39,10 @@ define double @test_cos(float %F) {
ret double %I
}
-declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.launder.invariant.group(i8*)
define i8* @barrier(i8* %p) {
- %q = call i8* @llvm.invariant.group.barrier(i8* %p)
+ %q = call i8* @llvm.launder.invariant.group(i8* %p)
ret i8* %q
}
Removed: llvm/trunk/test/Other/Inputs/invariant.group.barrier.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Other/Inputs/invariant.group.barrier.ll?rev=331447&view=auto
==============================================================================
--- llvm/trunk/test/Other/Inputs/invariant.group.barrier.ll (original)
+++ llvm/trunk/test/Other/Inputs/invariant.group.barrier.ll (removed)
@@ -1,15 +0,0 @@
-; RUN: opt -S -gvn < %s | FileCheck %s
-; RUN: opt -S -newgvn < %s | FileCheck %s
-; RUN: opt -S -O3 < %s | FileCheck %s
-
-; This test check if optimizer is not proving equality based on mustalias
-; CHECK-LABEL: define void @dontProveEquality(i8* %a)
-define void @dontProveEquality(i8* %a) {
- %b = call i8* @llvm.invariant.group.barrier(i8* %a)
- %r = i1 icmp eq i8* %b, i8* %a
-;CHECK: call void @use(%r)
- call void @use(%r)
-}
-
-declare void @use(i1)
-declare i8* @llvm.invariant.group.barrier(i8 *)
Removed: llvm/trunk/test/Other/invariant.group.barrier.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Other/invariant.group.barrier.ll?rev=331447&view=auto
==============================================================================
--- llvm/trunk/test/Other/invariant.group.barrier.ll (original)
+++ llvm/trunk/test/Other/invariant.group.barrier.ll (removed)
@@ -1,83 +0,0 @@
-; RUN: opt -S -early-cse < %s | FileCheck %s
-; RUN: opt -S -gvn < %s | FileCheck %s
-; RUN: opt -S -newgvn < %s | FileCheck %s
-; RUN: opt -S -O3 < %s | FileCheck %s
-
-; These tests checks if passes with CSE functionality can do CSE on
-; invariant.group.barrier, that is prohibited if there is a memory clobber
-; between barriers call.
-
-; CHECK-LABEL: define i8 @optimizable()
-define i8 @optimizable() {
-entry:
- %ptr = alloca i8
- store i8 42, i8* %ptr, !invariant.group !0
-; CHECK: call i8* @llvm.invariant.group.barrier.p0i8
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
-; FIXME: This one could be CSE
-; CHECK: call i8* @llvm.invariant.group.barrier
- %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
-; CHECK: call void @clobber(i8* {{.*}}%ptr)
- call void @clobber(i8* %ptr)
-
-; CHECK: call void @use(i8* {{.*}}%ptr2)
- call void @use(i8* %ptr2)
-; CHECK: call void @use(i8* {{.*}}%ptr3)
- call void @use(i8* %ptr3)
-; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
- %v = load i8, i8* %ptr3, !invariant.group !0
-
- ret i8 %v
-}
-
-; CHECK-LABEL: define i8 @unoptimizable()
-define i8 @unoptimizable() {
-entry:
- %ptr = alloca i8
- store i8 42, i8* %ptr, !invariant.group !0
-; CHECK: call i8* @llvm.invariant.group.barrier.p0i8
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
- call void @clobber(i8* %ptr)
-; CHECK: call i8* @llvm.invariant.group.barrier.p0i8
- %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
-; CHECK: call void @clobber(i8* {{.*}}%ptr)
- call void @clobber(i8* %ptr)
-; CHECK: call void @use(i8* {{.*}}%ptr2)
- call void @use(i8* %ptr2)
-; CHECK: call void @use(i8* {{.*}}%ptr3)
- call void @use(i8* %ptr3)
-; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
- %v = load i8, i8* %ptr3, !invariant.group !0
-
- ret i8 %v
-}
-
-; CHECK-LABEL: define i8 @unoptimizable2()
-define i8 @unoptimizable2() {
- %ptr = alloca i8
- store i8 42, i8* %ptr, !invariant.group !0
-; CHECK: call i8* @llvm.invariant.group.barrier
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
- store i8 43, i8* %ptr
-; CHECK: call i8* @llvm.invariant.group.barrier
- %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
-; CHECK: call void @clobber(i8* {{.*}}%ptr)
- call void @clobber(i8* %ptr)
-; CHECK: call void @use(i8* {{.*}}%ptr2)
- call void @use(i8* %ptr2)
-; CHECK: call void @use(i8* {{.*}}%ptr3)
- call void @use(i8* %ptr3)
-; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
- %v = load i8, i8* %ptr3, !invariant.group !0
- ret i8 %v
-}
-
-declare void @use(i8* readonly)
-
-declare void @clobber(i8*)
-; CHECK: Function Attrs: inaccessiblememonly nounwind{{$}}
-; CHECK-NEXT: declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
-
-!0 = !{}
-
Added: llvm/trunk/test/Other/launder.invariant.group.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Other/launder.invariant.group.ll?rev=331448&view=auto
==============================================================================
--- llvm/trunk/test/Other/launder.invariant.group.ll (added)
+++ llvm/trunk/test/Other/launder.invariant.group.ll Thu May 3 04:03:01 2018
@@ -0,0 +1,94 @@
+; RUN: opt -S -early-cse < %s | FileCheck %s
+; RUN: opt -S -gvn < %s | FileCheck %s
+; RUN: opt -S -newgvn < %s | FileCheck %s
+; RUN: opt -S -O3 < %s | FileCheck %s
+
+; These tests checks if passes with CSE functionality can do CSE on
+; launder.invariant.group, that is prohibited if there is a memory clobber
+; between barriers call.
+
+; CHECK-LABEL: define i8 @optimizable()
+define i8 @optimizable() {
+entry:
+ %ptr = alloca i8
+ store i8 42, i8* %ptr, !invariant.group !0
+; CHECK: call i8* @llvm.launder.invariant.group.p0i8
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+; FIXME: This one could be CSE
+; CHECK: call i8* @llvm.launder.invariant.group
+ %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+; CHECK: call void @clobber(i8* {{.*}}%ptr)
+ call void @clobber(i8* %ptr)
+
+; CHECK: call void @use(i8* {{.*}}%ptr2)
+ call void @use(i8* %ptr2)
+; CHECK: call void @use(i8* {{.*}}%ptr3)
+ call void @use(i8* %ptr3)
+; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
+ %v = load i8, i8* %ptr3, !invariant.group !0
+
+ ret i8 %v
+}
+
+; CHECK-LABEL: define i8 @unoptimizable()
+define i8 @unoptimizable() {
+entry:
+ %ptr = alloca i8
+ store i8 42, i8* %ptr, !invariant.group !0
+; CHECK: call i8* @llvm.launder.invariant.group.p0i8
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+ call void @clobber(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group.p0i8
+ %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+; CHECK: call void @clobber(i8* {{.*}}%ptr)
+ call void @clobber(i8* %ptr)
+; CHECK: call void @use(i8* {{.*}}%ptr2)
+ call void @use(i8* %ptr2)
+; CHECK: call void @use(i8* {{.*}}%ptr3)
+ call void @use(i8* %ptr3)
+; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
+ %v = load i8, i8* %ptr3, !invariant.group !0
+
+ ret i8 %v
+}
+
+; CHECK-LABEL: define i8 @unoptimizable2()
+define i8 @unoptimizable2() {
+ %ptr = alloca i8
+ store i8 42, i8* %ptr, !invariant.group !0
+; CHECK: call i8* @llvm.launder.invariant.group
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+ store i8 43, i8* %ptr
+; CHECK: call i8* @llvm.launder.invariant.group
+ %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+; CHECK: call void @clobber(i8* {{.*}}%ptr)
+ call void @clobber(i8* %ptr)
+; CHECK: call void @use(i8* {{.*}}%ptr2)
+ call void @use(i8* %ptr2)
+; CHECK: call void @use(i8* {{.*}}%ptr3)
+ call void @use(i8* %ptr3)
+; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
+ %v = load i8, i8* %ptr3, !invariant.group !0
+ ret i8 %v
+}
+
+; This test check if optimizer is not proving equality based on mustalias
+; CHECK-LABEL: define void @dontProveEquality(i8* %a)
+define void @dontProveEquality(i8* %a) {
+ %b = call i8* @llvm.launder.invariant.group.p0i8(i8* %a)
+ %r = icmp eq i8* %b, %a
+;CHECK: call void @useBool(i1 %r)
+ call void @useBool(i1 %r)
+ ret void
+}
+
+declare void @use(i8* readonly)
+declare void @useBool(i1)
+
+declare void @clobber(i8*)
+; CHECK: Function Attrs: inaccessiblememonly nounwind speculatable{{$}}
+; CHECK-NEXT: declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+
+!0 = !{}
+
Modified: llvm/trunk/test/Transforms/CodeGenPrepare/invariant.group.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/CodeGenPrepare/invariant.group.ll?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/CodeGenPrepare/invariant.group.ll (original)
+++ llvm/trunk/test/Transforms/CodeGenPrepare/invariant.group.ll Thu May 3 04:03:01 2018
@@ -6,10 +6,10 @@
define void @foo() {
enter:
; CHECK-NOT: !invariant.group
- ; CHECK-NOT: @llvm.invariant.group.barrier.p0i8(
+ ; CHECK-NOT: @llvm.launder.invariant.group.p0i8(
; CHECK: %val = load i8, i8* @tmp, !tbaa
%val = load i8, i8* @tmp, !invariant.group !0, !tbaa !{!1, !1, i64 0}
- %ptr = call i8* @llvm.invariant.group.barrier.p0i8(i8* @tmp)
+ %ptr = call i8* @llvm.launder.invariant.group.p0i8(i8* @tmp)
; CHECK: store i8 42, i8* @tmp
store i8 42, i8* %ptr, !invariant.group !0
@@ -18,7 +18,7 @@ enter:
}
; CHECK-LABEL: }
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
!0 = !{!"something"}
!1 = !{!"x", !0}
Modified: llvm/trunk/test/Transforms/GVN/invariant.group.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/GVN/invariant.group.ll?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/GVN/invariant.group.ll (original)
+++ llvm/trunk/test/Transforms/GVN/invariant.group.ll Thu May 3 04:03:01 2018
@@ -25,7 +25,7 @@ define i8 @optimizable1() {
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
%a = load i8, i8* %ptr, !invariant.group !0
call void @foo(i8* %ptr2); call to use %ptr2
@@ -242,7 +242,7 @@ define i8 @optimizable4() {
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK-NOT: load
%a = load i8, i8* %ptr2, !invariant.group !0
@@ -314,7 +314,7 @@ entry:
; CHECK: store i8 %unknownValue, i8* %ptr, !invariant.group !0
store i8 %unknownValue, i8* %ptr, !invariant.group !0
- %newPtr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+ %newPtr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK-NOT: load
%d = load i8, i8* %newPtr2, !invariant.group !0
; CHECK: ret i8 %unknownValue
@@ -441,7 +441,7 @@ declare void @_ZN1A3fooEv(%struct.A*)
declare void @_ZN1AC1Ev(%struct.A*)
declare void @fooBit(i1*, i1)
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
; Function Attrs: nounwind
declare void @llvm.assume(i1 %cmp.vtables) #0
Modified: llvm/trunk/test/Transforms/GlobalOpt/invariant.group.barrier.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/GlobalOpt/invariant.group.barrier.ll?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/GlobalOpt/invariant.group.barrier.ll (original)
+++ llvm/trunk/test/Transforms/GlobalOpt/invariant.group.barrier.ll Thu May 3 04:03:01 2018
@@ -33,7 +33,7 @@ enter:
store i32 %val, i32* %valptr
%0 = bitcast i32* %valptr to i8*
- %barr = call i8* @llvm.invariant.group.barrier(i8* %0)
+ %barr = call i8* @llvm.launder.invariant.group(i8* %0)
%1 = bitcast i8* %barr to i32*
%val2 = load i32, i32* %1
@@ -41,7 +41,7 @@ enter:
ret void
}
-; We can't step through invariant.group.barrier here, because that would change
+; We can't step through launder.invariant.group here, because that would change
; this load in @usage_of_globals()
; val = load i32, i32* %ptrVal, !invariant.group !0
; into
@@ -54,7 +54,7 @@ enter:
store i32 13, i32* @tmp3, !invariant.group !0
%0 = bitcast i32* @tmp3 to i8*
- %barr = call i8* @llvm.invariant.group.barrier(i8* %0)
+ %barr = call i8* @llvm.launder.invariant.group(i8* %0)
%1 = bitcast i8* %barr to i32*
store i32* %1, i32** @ptrToTmp3
@@ -74,6 +74,6 @@ entry:
declare void @changeTmp3ValAndCallBarrierInside()
-declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.launder.invariant.group(i8*)
!0 = !{!"something"}
Modified: llvm/trunk/test/Transforms/NewGVN/invariant.group.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/NewGVN/invariant.group.ll?rev=331448&r1=331447&r2=331448&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/NewGVN/invariant.group.ll (original)
+++ llvm/trunk/test/Transforms/NewGVN/invariant.group.ll Thu May 3 04:03:01 2018
@@ -26,7 +26,7 @@ define i8 @optimizable1() {
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
%a = load i8, i8* %ptr, !invariant.group !0
call void @foo(i8* %ptr2); call to use %ptr2
@@ -243,8 +243,7 @@ define i8 @optimizable4() {
entry:
%ptr = alloca i8
store i8 42, i8* %ptr, !invariant.group !0
- %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
-; CHECK-NOT: load
+ %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
%a = load i8, i8* %ptr2, !invariant.group !0
; CHECK: ret i8 42
@@ -315,7 +314,7 @@ entry:
; CHECK: store i8 %unknownValue, i8* %ptr, !invariant.group !0
store i8 %unknownValue, i8* %ptr, !invariant.group !0
- %newPtr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+ %newPtr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
; CHECK-NOT: load
%d = load i8, i8* %newPtr2, !invariant.group !0
; CHECK: ret i8 %unknownValue
@@ -442,7 +441,7 @@ declare void @_ZN1A3fooEv(%struct.A*)
declare void @_ZN1AC1Ev(%struct.A*)
declare void @fooBit(i1*, i1)
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
; Function Attrs: nounwind
declare void @llvm.assume(i1 %cmp.vtables) #0
More information about the llvm-commits
mailing list