<div dir="ltr">I've temporarily reverted this again as it's causing fairly widespread performance issues. The commit here has a link to the bug.<div></div><br><div>echristo@athyra ~/s/llvm-project> git push<br>To github.com:llvm/llvm-project.git<br> 8f0a8ed44e2..7bfaa400863 master -> master<br><br>Sorry for any inconvenience.</div><div><br></div><div>-eric<br><div><div><br></div><div><br></div></div></div></div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Mon, Jul 13, 2020 at 4:06 PM via llvm-commits <<a href="mailto:llvm-commits@lists.llvm.org">llvm-commits@lists.llvm.org</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0px 0px 0px 0.8ex;border-left:1px solid rgb(204,204,204);padding-left:1ex"><br>
Author: Tyker<br>
Date: 2020-07-14T01:05:58+02:00<br>
New Revision: 8d09f20798ac180b1749276bff364682ce0196ab<br>
<br>
URL: <a href="https://github.com/llvm/llvm-project/commit/8d09f20798ac180b1749276bff364682ce0196ab" rel="noreferrer" target="_blank">https://github.com/llvm/llvm-project/commit/8d09f20798ac180b1749276bff364682ce0196ab</a><br>
DIFF: <a href="https://github.com/llvm/llvm-project/commit/8d09f20798ac180b1749276bff364682ce0196ab.diff" rel="noreferrer" target="_blank">https://github.com/llvm/llvm-project/commit/8d09f20798ac180b1749276bff364682ce0196ab.diff</a><br>
<br>
LOG: [AssumeBundles] Use operand bundles to encode alignment assumptions<br>
<br>
Summary:<br>
NOTE: There is a mailing list discussion on this: <a href="http://lists.llvm.org/pipermail/llvm-dev/2019-December/137632.html" rel="noreferrer" target="_blank">http://lists.llvm.org/pipermail/llvm-dev/2019-December/137632.html</a><br>
<br>
Complemantary to the assumption outliner prototype in D71692, this patch<br>
shows how we could simplify the code emitted for an alignemnt<br>
assumption. The generated code is smaller, less fragile, and it makes it<br>
easier to recognize the additional use as a "assumption use".<br>
<br>
As mentioned in D71692 and on the mailing list, we could adopt this<br>
scheme, and similar schemes for other patterns, without adopting the<br>
assumption outlining.<br>
<br>
Reviewers: hfinkel, xbolva00, lebedev.ri, nikic, rjmccall, spatel, jdoerfert, sstefan1<br>
<br>
Reviewed By: jdoerfert<br>
<br>
Subscribers: thopre, yamauchi, kuter, fhahn, merge_guards_bot, hiraditya, bollu, rkruppe, cfe-commits, llvm-commits<br>
<br>
Tags: #clang, #llvm<br>
<br>
Differential Revision: <a href="https://reviews.llvm.org/D71739" rel="noreferrer" target="_blank">https://reviews.llvm.org/D71739</a><br>
<br>
Added: <br>
<br>
<br>
Modified: <br>
clang/lib/CodeGen/CodeGenFunction.cpp<br>
clang/test/CodeGen/align_value.cpp<br>
clang/test/CodeGen/alloc-align-attr.c<br>
clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c<br>
clang/test/CodeGen/builtin-align-array.c<br>
clang/test/CodeGen/builtin-align.c<br>
clang/test/CodeGen/builtin-assume-aligned.c<br>
clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp<br>
clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp<br>
clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp<br>
clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp<br>
clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp<br>
clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp<br>
clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp<br>
clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp<br>
clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp<br>
clang/test/CodeGen/catch-alignment-assumption-openmp.cpp<br>
clang/test/CodeGen/non-power-of-2-alignment-assumptions.c<br>
clang/test/OpenMP/simd_codegen.cpp<br>
clang/test/OpenMP/simd_metadata.c<br>
clang/test/OpenMP/target_teams_distribute_parallel_for_simd_codegen.cpp<br>
llvm/include/llvm/IR/IRBuilder.h<br>
llvm/include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h<br>
llvm/lib/Analysis/AssumeBundleQueries.cpp<br>
llvm/lib/IR/IRBuilder.cpp<br>
llvm/lib/IR/Verifier.cpp<br>
llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp<br>
llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp<br>
llvm/test/Transforms/AlignmentFromAssumptions/simple.ll<br>
llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll<br>
llvm/test/Transforms/Inline/align.ll<br>
llvm/test/Transforms/InstCombine/assume.ll<br>
llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll<br>
llvm/test/Verifier/assume-bundles.ll<br>
llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp<br>
<br>
Removed: <br>
<br>
<br>
<br>
################################################################################<br>
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp<br>
index 8ce488f35dd3..4a7c84562dee 100644<br>
--- a/clang/lib/CodeGen/CodeGenFunction.cpp<br>
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp<br>
@@ -2154,13 +2154,39 @@ void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,<br>
SourceLocation AssumptionLoc,<br>
llvm::Value *Alignment,<br>
llvm::Value *OffsetValue) {<br>
- llvm::Value *TheCheck;<br>
- llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(<br>
- CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);<br>
+ if (Alignment->getType() != IntPtrTy)<br>
+ Alignment =<br>
+ Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");<br>
+ if (OffsetValue && OffsetValue->getType() != IntPtrTy)<br>
+ OffsetValue =<br>
+ Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");<br>
+ llvm::Value *TheCheck = nullptr;<br>
if (SanOpts.has(SanitizerKind::Alignment)) {<br>
- emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,<br>
- OffsetValue, TheCheck, Assumption);<br>
+ llvm::Value *PtrIntValue =<br>
+ Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");<br>
+<br>
+ if (OffsetValue) {<br>
+ bool IsOffsetZero = false;<br>
+ if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))<br>
+ IsOffsetZero = CI->isZero();<br>
+<br>
+ if (!IsOffsetZero)<br>
+ PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");<br>
+ }<br>
+<br>
+ llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);<br>
+ llvm::Value *Mask =<br>
+ Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));<br>
+ llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");<br>
+ TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");<br>
}<br>
+ llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(<br>
+ CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);<br>
+<br>
+ if (!SanOpts.has(SanitizerKind::Alignment))<br>
+ return;<br>
+ emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,<br>
+ OffsetValue, TheCheck, Assumption);<br>
}<br>
<br>
void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,<br>
<br>
diff --git a/clang/test/CodeGen/align_value.cpp b/clang/test/CodeGen/align_value.cpp<br>
index acbfbaf2ba5c..a18cb651fe4c 100644<br>
--- a/clang/test/CodeGen/align_value.cpp<br>
+++ b/clang/test/CodeGen/align_value.cpp<br>
@@ -29,10 +29,7 @@ struct ad_struct {<br>
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.ad_struct*, %struct.ad_struct** [[X_ADDR]], align 8<br>
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_AD_STRUCT:%.*]], %struct.ad_struct* [[TMP0]], i32 0, i32 0<br>
// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[A]], align 8<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ]<br>
// CHECK-NEXT: ret double* [[TMP1]]<br>
//<br>
double *foo(ad_struct& x) {<br>
@@ -48,10 +45,7 @@ double *foo(ad_struct& x) {<br>
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.ad_struct*, %struct.ad_struct** [[X_ADDR]], align 8<br>
// CHECK-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_AD_STRUCT:%.*]], %struct.ad_struct* [[TMP0]], i32 0, i32 0<br>
// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[A]], align 8<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ]<br>
// CHECK-NEXT: ret double* [[TMP1]]<br>
//<br>
double *goo(ad_struct *x) {<br>
@@ -66,10 +60,7 @@ double *goo(ad_struct *x) {<br>
// CHECK-NEXT: store double** [[X]], double*** [[X_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[TMP0]], align 8<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ]<br>
// CHECK-NEXT: ret double* [[TMP1]]<br>
//<br>
double *bar(aligned_double *x) {<br>
@@ -84,10 +75,7 @@ double *bar(aligned_double *x) {<br>
// CHECK-NEXT: store double** [[X]], double*** [[X_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[TMP0]], align 8<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ]<br>
// CHECK-NEXT: ret double* [[TMP1]]<br>
//<br>
double *car(aligned_double &x) {<br>
@@ -103,10 +91,7 @@ double *car(aligned_double &x) {<br>
// CHECK-NEXT: [[TMP0:%.*]] = load double**, double*** [[X_ADDR]], align 8<br>
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds double*, double** [[TMP0]], i64 5<br>
// CHECK-NEXT: [[TMP1:%.*]] = load double*, double** [[ARRAYIDX]], align 8<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[TMP1]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[TMP1]], i64 64) ]<br>
// CHECK-NEXT: ret double* [[TMP1]]<br>
//<br>
double *dar(aligned_double *x) {<br>
@@ -118,10 +103,7 @@ aligned_double eep();<br>
// CHECK-LABEL: define {{[^@]+}}@_Z3retv() #0<br>
// CHECK-NEXT: entry:<br>
// CHECK-NEXT: [[CALL:%.*]] = call double* @_Z3eepv()<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint double* [[CALL]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[CALL]], i64 64) ]<br>
// CHECK-NEXT: ret double* [[CALL]]<br>
//<br>
double *ret() {<br>
<br>
diff --git a/clang/test/CodeGen/alloc-align-attr.c b/clang/test/CodeGen/alloc-align-attr.c<br>
index 9517c50dbb1d..44a57291b47c 100644<br>
--- a/clang/test/CodeGen/alloc-align-attr.c<br>
+++ b/clang/test/CodeGen/alloc-align-attr.c<br>
@@ -11,12 +11,8 @@ __INT32_TYPE__*m1(__INT32_TYPE__ i) __attribute__((alloc_align(1)));<br>
// CHECK-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4<br>
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4<br>
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 [[TMP0]])<br>
-// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64<br>
-// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: [[CASTED_ALIGN:%.*]] = zext i32 [[TMP0]] to i64<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]<br>
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4<br>
// CHECK-NEXT: ret i32 [[TMP1]]<br>
//<br>
@@ -32,12 +28,8 @@ __INT32_TYPE__ test1(__INT32_TYPE__ a) {<br>
// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[TMP0]] to i32<br>
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m1(i32 [[CONV]])<br>
-// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[CONV]] to i64<br>
-// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: [[CASTED_ALIGN:%.*]] = zext i32 [[CONV]] to i64<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]<br>
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4<br>
// CHECK-NEXT: ret i32 [[TMP1]]<br>
//<br>
@@ -55,11 +47,7 @@ __INT32_TYPE__ *m2(__SIZE_TYPE__ i) __attribute__((alloc_align(1)));<br>
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4<br>
// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP0]] to i64<br>
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 [[CONV]])<br>
-// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[CONV]], 1<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CONV]]) ]<br>
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4<br>
// CHECK-NEXT: ret i32 [[TMP1]]<br>
//<br>
@@ -75,11 +63,7 @@ __INT32_TYPE__ test3(__INT32_TYPE__ a) {<br>
// CHECK-NEXT: store i64 [[A]], i64* [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP0:%.*]] = load i64, i64* [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m2(i64 [[TMP0]])<br>
-// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[TMP0]], 1<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[TMP0]]) ]<br>
// CHECK-NEXT: [[TMP1:%.*]] = load i32, i32* [[CALL]], align 4<br>
// CHECK-NEXT: ret i32 [[TMP1]]<br>
//<br>
@@ -115,12 +99,8 @@ __INT32_TYPE__ *m3(struct Empty s, __int128_t i) __attribute__((alloc_align(2)))<br>
// CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP4]], i32 0, i32 1<br>
// CHECK-NEXT: [[TMP8:%.*]] = load i64, i64* [[TMP7]], align 8<br>
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m3(i64 [[TMP6]], i64 [[TMP8]])<br>
-// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64<br>
-// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP3]] to i64<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]<br>
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[CALL]], align 4<br>
// CHECK-NEXT: ret i32 [[TMP9]]<br>
//<br>
@@ -157,12 +137,8 @@ __INT32_TYPE__ *m4(struct MultiArgs s, __int128_t i) __attribute__((alloc_align(<br>
// CHECK-NEXT: [[TMP12:%.*]] = getelementptr inbounds { i64, i64 }, { i64, i64 }* [[TMP9]], i32 0, i32 1<br>
// CHECK-NEXT: [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8<br>
// CHECK-NEXT: [[CALL:%.*]] = call i32* @m4(i64 [[TMP6]], i64 [[TMP8]], i64 [[TMP11]], i64 [[TMP13]])<br>
-// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = trunc i128 [[TMP3]] to i64<br>
-// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: [[CASTED_ALIGN:%.*]] = trunc i128 [[TMP3]] to i64<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 [[CASTED_ALIGN]]) ]<br>
// CHECK-NEXT: [[TMP14:%.*]] = load i32, i32* [[CALL]], align 4<br>
// CHECK-NEXT: ret i32 [[TMP14]]<br>
//<br>
<br>
diff --git a/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c b/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c<br>
index fa4ee8db12e7..cd8a6f19b4f4 100644<br>
--- a/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c<br>
+++ b/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c<br>
@@ -36,12 +36,8 @@ void *t2_immediate2() {<br>
// CHECK-NEXT: store i32 [[ALIGNMENT:%.*]], i32* [[ALIGNMENT_ADDR]], align 4<br>
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ALIGNMENT_ADDR]], align 4<br>
// CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 320, i32 [[TMP0]])<br>
-// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64<br>
-// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[CALL]], i64 [[TMP1]]) ]<br>
// CHECK-NEXT: ret i8* [[CALL]]<br>
//<br>
void *t3_variable(int alignment) {<br>
<br>
diff --git a/clang/test/CodeGen/builtin-align-array.c b/clang/test/CodeGen/builtin-align-array.c<br>
index 97235c33b7fb..31f7b42b5617 100644<br>
--- a/clang/test/CodeGen/builtin-align-array.c<br>
+++ b/clang/test/CodeGen/builtin-align-array.c<br>
@@ -4,7 +4,7 @@<br>
<br>
extern int func(char *c);<br>
<br>
-// CHECK-LABEL: define {{[^@]+}}@test_array() #0<br>
+// CHECK-LABEL: @test_array(<br>
// CHECK-NEXT: entry:<br>
// CHECK-NEXT: [[BUF:%.*]] = alloca [1024 x i8], align 16<br>
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 44<br>
@@ -12,10 +12,7 @@ extern int func(char *c);<br>
// CHECK-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], -16<br>
// CHECK-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]<br>
// CHECK-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX]], i64 [[DIFF]]<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 15<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 16) ]<br>
// CHECK-NEXT: [[CALL:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT]])<br>
// CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 22<br>
// CHECK-NEXT: [[INTPTR2:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64<br>
@@ -23,13 +20,10 @@ extern int func(char *c);<br>
// CHECK-NEXT: [[ALIGNED_INTPTR4:%.*]] = and i64 [[OVER_BOUNDARY]], -32<br>
// CHECK-NEXT: [[DIFF5:%.*]] = sub i64 [[ALIGNED_INTPTR4]], [[INTPTR2]]<br>
// CHECK-NEXT: [[ALIGNED_RESULT6:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX1]], i64 [[DIFF5]]<br>
-// CHECK-NEXT: [[PTRINT7:%.*]] = ptrtoint i8* [[ALIGNED_RESULT6]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR8:%.*]] = and i64 [[PTRINT7]], 31<br>
-// CHECK-NEXT: [[MASKCOND9:%.*]] = icmp eq i64 [[MASKEDPTR8]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND9]])<br>
-// CHECK-NEXT: [[CALL10:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]])<br>
-// CHECK-NEXT: [[ARRAYIDX11:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 16<br>
-// CHECK-NEXT: [[SRC_ADDR:%.*]] = ptrtoint i8* [[ARRAYIDX11]] to i64<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT6]], i64 32) ]<br>
+// CHECK-NEXT: [[CALL7:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]])<br>
+// CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 16<br>
+// CHECK-NEXT: [[SRC_ADDR:%.*]] = ptrtoint i8* [[ARRAYIDX8]] to i64<br>
// CHECK-NEXT: [[SET_BITS:%.*]] = and i64 [[SRC_ADDR]], 63<br>
// CHECK-NEXT: [[IS_ALIGNED:%.*]] = icmp eq i64 [[SET_BITS]], 0<br>
// CHECK-NEXT: [[CONV:%.*]] = zext i1 [[IS_ALIGNED]] to i32<br>
@@ -42,7 +36,7 @@ int test_array(void) {<br>
return __builtin_is_aligned(&buf[16], 64);<br>
}<br>
<br>
-// CHECK-LABEL: define {{[^@]+}}@test_array_should_not_mask() #0<br>
+// CHECK-LABEL: @test_array_should_not_mask(<br>
// CHECK-NEXT: entry:<br>
// CHECK-NEXT: [[BUF:%.*]] = alloca [1024 x i8], align 32<br>
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 64<br>
@@ -50,10 +44,7 @@ int test_array(void) {<br>
// CHECK-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], -16<br>
// CHECK-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]<br>
// CHECK-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX]], i64 [[DIFF]]<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 15<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 16) ]<br>
// CHECK-NEXT: [[CALL:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT]])<br>
// CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds [1024 x i8], [1024 x i8]* [[BUF]], i64 0, i64 32<br>
// CHECK-NEXT: [[INTPTR2:%.*]] = ptrtoint i8* [[ARRAYIDX1]] to i64<br>
@@ -61,11 +52,8 @@ int test_array(void) {<br>
// CHECK-NEXT: [[ALIGNED_INTPTR4:%.*]] = and i64 [[OVER_BOUNDARY]], -32<br>
// CHECK-NEXT: [[DIFF5:%.*]] = sub i64 [[ALIGNED_INTPTR4]], [[INTPTR2]]<br>
// CHECK-NEXT: [[ALIGNED_RESULT6:%.*]] = getelementptr inbounds i8, i8* [[ARRAYIDX1]], i64 [[DIFF5]]<br>
-// CHECK-NEXT: [[PTRINT7:%.*]] = ptrtoint i8* [[ALIGNED_RESULT6]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR8:%.*]] = and i64 [[PTRINT7]], 31<br>
-// CHECK-NEXT: [[MASKCOND9:%.*]] = icmp eq i64 [[MASKEDPTR8]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND9]])<br>
-// CHECK-NEXT: [[CALL10:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT6]], i64 32) ]<br>
+// CHECK-NEXT: [[CALL7:%.*]] = call i32 @func(i8* [[ALIGNED_RESULT6]])<br>
// CHECK-NEXT: ret i32 1<br>
//<br>
int test_array_should_not_mask(void) {<br>
<br>
diff --git a/clang/test/CodeGen/builtin-align.c b/clang/test/CodeGen/builtin-align.c<br>
index 7e66e2b5c0b9..60f7fc99c1d4 100644<br>
--- a/clang/test/CodeGen/builtin-align.c<br>
+++ b/clang/test/CodeGen/builtin-align.c<br>
@@ -122,11 +122,7 @@ _Bool is_aligned(TYPE ptr, unsigned align) {<br>
// CHECK-VOID_PTR-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[OVER_BOUNDARY]], [[INVERTED_MASK]]<br>
// CHECK-VOID_PTR-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]<br>
// CHECK-VOID_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]]<br>
-// CHECK-VOID_PTR-NEXT: [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1<br>
-// CHECK-VOID_PTR-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64<br>
-// CHECK-VOID_PTR-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]<br>
-// CHECK-VOID_PTR-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 [[ALIGNMENT]]) ]<br>
// CHECK-VOID_PTR-NEXT: ret i8* [[ALIGNED_RESULT]]<br>
//<br>
// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@align_up<br>
@@ -142,11 +138,7 @@ _Bool is_aligned(TYPE ptr, unsigned align) {<br>
// CHECK-FLOAT_PTR-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR]] to i8*<br>
// CHECK-FLOAT_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 [[DIFF]]<br>
// CHECK-FLOAT_PTR-NEXT: [[TMP1:%.*]] = bitcast i8* [[ALIGNED_RESULT]] to float*<br>
-// CHECK-FLOAT_PTR-NEXT: [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1<br>
-// CHECK-FLOAT_PTR-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[TMP1]] to i64<br>
-// CHECK-FLOAT_PTR-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]<br>
-// CHECK-FLOAT_PTR-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[TMP1]], i64 [[ALIGNMENT]]) ]<br>
// CHECK-FLOAT_PTR-NEXT: ret float* [[TMP1]]<br>
//<br>
// CHECK-LONG-LABEL: define {{[^@]+}}@align_up<br>
@@ -184,11 +176,7 @@ TYPE align_up(TYPE ptr, unsigned align) {<br>
// CHECK-VOID_PTR-NEXT: [[ALIGNED_INTPTR:%.*]] = and i64 [[INTPTR]], [[INVERTED_MASK]]<br>
// CHECK-VOID_PTR-NEXT: [[DIFF:%.*]] = sub i64 [[ALIGNED_INTPTR]], [[INTPTR]]<br>
// CHECK-VOID_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[PTR]], i64 [[DIFF]]<br>
-// CHECK-VOID_PTR-NEXT: [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1<br>
-// CHECK-VOID_PTR-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[ALIGNED_RESULT]] to i64<br>
-// CHECK-VOID_PTR-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]<br>
-// CHECK-VOID_PTR-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-VOID_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[ALIGNED_RESULT]], i64 [[ALIGNMENT]]) ]<br>
// CHECK-VOID_PTR-NEXT: ret i8* [[ALIGNED_RESULT]]<br>
//<br>
// CHECK-FLOAT_PTR-LABEL: define {{[^@]+}}@align_down<br>
@@ -203,11 +191,7 @@ TYPE align_up(TYPE ptr, unsigned align) {<br>
// CHECK-FLOAT_PTR-NEXT: [[TMP0:%.*]] = bitcast float* [[PTR]] to i8*<br>
// CHECK-FLOAT_PTR-NEXT: [[ALIGNED_RESULT:%.*]] = getelementptr inbounds i8, i8* [[TMP0]], i64 [[DIFF]]<br>
// CHECK-FLOAT_PTR-NEXT: [[TMP1:%.*]] = bitcast i8* [[ALIGNED_RESULT]] to float*<br>
-// CHECK-FLOAT_PTR-NEXT: [[MASK1:%.*]] = sub i64 [[ALIGNMENT]], 1<br>
-// CHECK-FLOAT_PTR-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[TMP1]] to i64<br>
-// CHECK-FLOAT_PTR-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK1]]<br>
-// CHECK-FLOAT_PTR-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-FLOAT_PTR-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[TMP1]], i64 [[ALIGNMENT]]) ]<br>
// CHECK-FLOAT_PTR-NEXT: ret float* [[TMP1]]<br>
//<br>
// CHECK-LONG-LABEL: define {{[^@]+}}@align_down<br>
<br>
diff --git a/clang/test/CodeGen/builtin-assume-aligned.c b/clang/test/CodeGen/builtin-assume-aligned.c<br>
index 90693cc21520..b9f1ebfbdcf5 100644<br>
--- a/clang/test/CodeGen/builtin-assume-aligned.c<br>
+++ b/clang/test/CodeGen/builtin-assume-aligned.c<br>
@@ -8,10 +8,7 @@<br>
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 0) ]<br>
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*<br>
// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8<br>
@@ -31,10 +28,7 @@ int test1(int *a) {<br>
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 0) ]<br>
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*<br>
// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8<br>
@@ -54,10 +48,7 @@ int test2(int *a) {<br>
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32) ]<br>
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*<br>
// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8<br>
@@ -81,11 +72,7 @@ int test3(int *a) {<br>
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*<br>
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[B_ADDR]], align 4<br>
// CHECK-NEXT: [[CONV:%.*]] = sext i32 [[TMP2]] to i64<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64<br>
-// CHECK-NEXT: [[OFFSETPTR:%.*]] = sub i64 [[PTRINT]], [[CONV]]<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 31<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 32, i64 [[CONV]]) ]<br>
// CHECK-NEXT: [[TMP3:%.*]] = bitcast i8* [[TMP1]] to i32*<br>
// CHECK-NEXT: store i32* [[TMP3]], i32** [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[A_ADDR]], align 8<br>
@@ -115,11 +102,7 @@ int *m2() __attribute__((assume_aligned(64, 12)));<br>
// CHECK-LABEL: define {{[^@]+}}@test6() #0<br>
// CHECK-NEXT: entry:<br>
// CHECK-NEXT: [[CALL:%.*]] = call i32* (...) @m2()<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[CALL]] to i64<br>
-// CHECK-NEXT: [[OFFSETPTR:%.*]] = sub i64 [[PTRINT]], 12<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 63<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[CALL]], i64 64, i64 12) ]<br>
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[CALL]], align 4<br>
// CHECK-NEXT: ret i32 [[TMP0]]<br>
//<br>
@@ -134,10 +117,7 @@ int test6() {<br>
// CHECK-NEXT: store i32* [[A]], i32** [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP0:%.*]] = load i32*, i32** [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[TMP0]] to i8*<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[TMP1]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 536870911<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[TMP1]], i64 536870912) ]<br>
// CHECK-NEXT: [[TMP2:%.*]] = bitcast i8* [[TMP1]] to i32*<br>
// CHECK-NEXT: store i32* [[TMP2]], i32** [[A_ADDR]], align 8<br>
// CHECK-NEXT: [[TMP3:%.*]] = load i32*, i32** [[A_ADDR]], align 8<br>
<br>
diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp<br>
index 96d264190bec..fb2b1a76116e 100644<br>
--- a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp<br>
+++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-lvalue.cpp<br>
@@ -21,9 +21,9 @@ char **load_from_ac_struct(struct ac_struct *x) {<br>
// CHECK-NEXT: %[[X_RELOADED:.*]] = load %[[STRUCT_AC_STRUCT]]*, %[[STRUCT_AC_STRUCT]]** %[[STRUCT_AC_STRUCT_ADDR]], align 8<br>
// CHECK: %[[A_ADDR:.*]] = getelementptr inbounds %[[STRUCT_AC_STRUCT]], %[[STRUCT_AC_STRUCT]]* %[[X_RELOADED]], i32 0, i32 0<br>
// CHECK: %[[A:.*]] = load i8**, i8*** %[[A_ADDR]], align 8<br>
- // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[A]] to i64<br>
- // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 2147483647<br>
- // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0<br>
+ // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[A]] to i64<br>
+ // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 2147483647<br>
+ // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0<br>
// CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8** %[[A]] to i64, !nosanitize<br>
// CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize<br>
// CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]:<br>
@@ -32,7 +32,7 @@ char **load_from_ac_struct(struct ac_struct *x) {<br>
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize<br>
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize<br>
// CHECK-SANITIZE: [[CONT]]:<br>
- // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])<br>
+ // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[A]], i64 2147483648) ]<br>
// CHECK-NEXT: ret i8** %[[A]]<br>
// CHECK-NEXT: }<br>
#line 100<br>
<br>
diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp<br>
index 0e3fa750c66c..46f7d09ae2aa 100644<br>
--- a/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp<br>
+++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-align_value-on-paramvar.cpp<br>
@@ -24,7 +24,7 @@ char **passthrough(__attribute__((align_value(0x80000000))) char **x) {<br>
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize<br>
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize<br>
// CHECK-SANITIZE: [[CONT]]:<br>
- // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])<br>
+ // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RELOADED]], i64 2147483648) ]<br>
// CHECK-NEXT: ret i8** %[[X_RELOADED]]<br>
// CHECK-NEXT: }<br>
#line 100<br>
<br>
diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp<br>
index 591eaa0e1313..40abbc387199 100644<br>
--- a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp<br>
+++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function-variable.cpp<br>
@@ -30,10 +30,10 @@ char **caller(char **x, unsigned long alignment) {<br>
// CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8<br>
// CHECK-NEXT: %[[ALIGNMENT_RELOADED:.*]] = load i64, i64* %[[ALIGNMENT_ADDR]], align 8<br>
// CHECK-NEXT: %[[X_RETURNED:.*]] = call i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]], i64 %[[ALIGNMENT_RELOADED]])<br>
- // CHECK-NEXT: %[[MASK:.*]] = sub i64 %[[ALIGNMENT_RELOADED]], 1<br>
- // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64<br>
- // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], %[[MASK]]<br>
- // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0<br>
+ // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64<br>
+ // CHECK-SANITIZE-NEXT: %[[MASK:.*]] = sub i64 %[[ALIGNMENT_RELOADED]], 1<br>
+ // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], %[[MASK]]<br>
+ // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0<br>
// CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64, !nosanitize<br>
// CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize<br>
// CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]:<br>
@@ -42,7 +42,7 @@ char **caller(char **x, unsigned long alignment) {<br>
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize<br>
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize<br>
// CHECK-SANITIZE: [[CONT]]:<br>
- // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])<br>
+ // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 %1) ]<br>
// CHECK-NEXT: ret i8** %[[X_RETURNED]]<br>
// CHECK-NEXT: }<br>
#line 100<br>
<br>
diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp<br>
index a41357933f91..87d903c69716 100644<br>
--- a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp<br>
+++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp<br>
@@ -39,7 +39,7 @@ char **caller(char **x) {<br>
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize<br>
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize<br>
// CHECK-SANITIZE: [[CONT]]:<br>
- // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])<br>
+ // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 128) ]<br>
// CHECK-NEXT: ret i8** %[[X_RETURNED]]<br>
// CHECK-NEXT: }<br>
#line 100<br>
<br>
diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp<br>
index e78667ce16e0..ecc96bcf6a53 100644<br>
--- a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp<br>
+++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function-two-params.cpp<br>
@@ -24,10 +24,10 @@ char **caller(char **x) {<br>
// CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8<br>
// CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8<br>
// CHECK-NEXT: %[[X_RETURNED:.*]] = call i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]])<br>
- // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64<br>
- // CHECK-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42<br>
- // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 2147483647<br>
- // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0<br>
+ // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64<br>
+ // CHECK-SANITIZE-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42<br>
+ // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 2147483647<br>
+ // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0<br>
// CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64, !nosanitize<br>
// CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize<br>
// CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]:<br>
@@ -36,7 +36,7 @@ char **caller(char **x) {<br>
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize<br>
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize<br>
// CHECK-SANITIZE: [[CONT]]:<br>
- // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])<br>
+ // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 2147483648, i64 42) ]<br>
// CHECK-NEXT: ret i8** %[[X_RETURNED]]<br>
// CHECK-NEXT: }<br>
#line 100<br>
<br>
diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp<br>
index f750bbd77d42..5bbc5843b89f 100644<br>
--- a/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp<br>
+++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-assume_aligned-on-function.cpp<br>
@@ -36,7 +36,7 @@ char **caller(char **x) {<br>
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize<br>
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize<br>
// CHECK-SANITIZE: [[CONT]]:<br>
- // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])<br>
+ // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 true) [ "align"(i8** %[[X_RETURNED]], i64 128) ]<br>
// CHECK-NEXT: ret i8** %[[X_RETURNED]]<br>
// CHECK-NEXT: }<br>
#line 100<br>
<br>
diff --git a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp<br>
index 4306e322f5fb..9c8944ba280b 100644<br>
--- a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp<br>
+++ b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params-variable.cpp<br>
@@ -16,10 +16,10 @@ void *caller(char **x, unsigned long offset) {<br>
// CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8<br>
// CHECK-NEXT: %[[BITCAST:.*]] = bitcast i8** %[[X_RELOADED]] to i8*<br>
// CHECK-NEXT: %[[OFFSET_RELOADED:.*]] = load i64, i64* %[[OFFSET_ADDR]], align 8<br>
- // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64<br>
- // CHECK-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], %[[OFFSET_RELOADED]]<br>
- // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911<br>
- // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0<br>
+ // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64<br>
+ // CHECK-SANITIZE-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], %[[OFFSET_RELOADED]]<br>
+ // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911<br>
+ // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0<br>
// CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[BITCAST]] to i64, !nosanitize<br>
// CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize<br>
// CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]:<br>
@@ -28,7 +28,7 @@ void *caller(char **x, unsigned long offset) {<br>
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize<br>
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize<br>
// CHECK-SANITIZE: [[CONT]]:<br>
- // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])<br>
+ // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[BITCAST]], i64 536870912, i64 %[[OFFSET_RELOADED]]) ]<br>
// CHECK-NEXT: ret i8* %[[BITCAST]]<br>
// CHECK-NEXT: }<br>
#line 100<br>
<br>
diff --git a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp<br>
index 27f53e92bed8..9f61e08106a0 100644<br>
--- a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp<br>
+++ b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-three-params.cpp<br>
@@ -13,10 +13,10 @@ void *caller(char **x) {<br>
// CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8<br>
// CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8<br>
// CHECK-NEXT: %[[BITCAST:.*]] = bitcast i8** %[[X_RELOADED]] to i8*<br>
- // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64<br>
- // CHECK-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42<br>
- // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911<br>
- // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0<br>
+ // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64<br>
+ // CHECK-SANITIZE-NEXT: %[[OFFSETPTR:.*]] = sub i64 %[[PTRINT]], 42<br>
+ // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[OFFSETPTR]], 536870911<br>
+ // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0<br>
// CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[BITCAST]] to i64, !nosanitize<br>
// CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize<br>
// CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]:<br>
@@ -25,7 +25,7 @@ void *caller(char **x) {<br>
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize<br>
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize<br>
// CHECK-SANITIZE: [[CONT]]:<br>
- // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])<br>
+ // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[BITCAST]], i64 536870912, i64 42) ]<br>
// CHECK-NEXT: ret i8* %[[BITCAST]]<br>
// CHECK-NEXT: }<br>
#line 100<br>
<br>
diff --git a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp<br>
index 5412270f3761..20bed646ff95 100644<br>
--- a/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp<br>
+++ b/clang/test/CodeGen/catch-alignment-assumption-builtin_assume_aligned-two-params.cpp<br>
@@ -13,9 +13,9 @@ void *caller(char **x) {<br>
// CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8<br>
// CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8<br>
// CHECK-NEXT: %[[BITCAST:.*]] = bitcast i8** %[[X_RELOADED]] to i8*<br>
- // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64<br>
- // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 536870911<br>
- // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0<br>
+ // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[BITCAST]] to i64<br>
+ // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 536870911<br>
+ // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0<br>
// CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[BITCAST]] to i64, !nosanitize<br>
// CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize<br>
// CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]:<br>
@@ -24,7 +24,7 @@ void *caller(char **x) {<br>
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize<br>
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize<br>
// CHECK-SANITIZE: [[CONT]]:<br>
- // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])<br>
+ // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[BITCAST]], i64 536870912) ]<br>
// CHECK-NEXT: ret i8* %[[BITCAST]]<br>
// CHECK-NEXT: }<br>
#line 100<br>
<br>
diff --git a/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp b/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp<br>
index 6d75ee0858da..353f2fd7f17b 100644<br>
--- a/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp<br>
+++ b/clang/test/CodeGen/catch-alignment-assumption-openmp.cpp<br>
@@ -12,9 +12,9 @@ void func(char *data) {<br>
// CHECK-NEXT: %[[DATA_ADDR:.*]] = alloca i8*, align 8<br>
// CHECK: store i8* %[[DATA]], i8** %[[DATA_ADDR]], align 8<br>
// CHECK: %[[DATA_RELOADED:.*]] = load i8*, i8** %[[DATA_ADDR]], align 8<br>
- // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[DATA_RELOADED]] to i64<br>
- // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 1073741823<br>
- // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0<br>
+ // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8* %[[DATA_RELOADED]] to i64<br>
+ // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 1073741823<br>
+ // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0<br>
// CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8* %[[DATA_RELOADED]] to i64, !nosanitize<br>
// CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize<br>
// CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]:<br>
@@ -23,7 +23,7 @@ void func(char *data) {<br>
// CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize<br>
// CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize<br>
// CHECK-SANITIZE: [[CONT]]:<br>
- // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]])<br>
+ // CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* %[[DATA_RELOADED]], i64 1073741824) ]<br>
<br>
#line 100<br>
#pragma omp for simd aligned(data : 0x40000000)<br>
<br>
diff --git a/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c b/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c<br>
index 9467f6228dfc..b8ce1699f7ed 100644<br>
--- a/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c<br>
+++ b/clang/test/CodeGen/non-power-of-2-alignment-assumptions.c<br>
@@ -9,12 +9,8 @@ void *__attribute__((alloc_align(1))) alloc(int align);<br>
// CHECK-NEXT: store i32 [[ALIGN:%.*]], i32* [[ALIGN_ADDR]], align 4<br>
// CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ALIGN_ADDR]], align 4<br>
// CHECK-NEXT: [[CALL:%.*]] = call i8* @alloc(i32 [[TMP0]])<br>
-// CHECK-NEXT: [[ALIGNMENTCAST:%.*]] = zext i32 [[TMP0]] to i64<br>
-// CHECK-NEXT: [[MASK:%.*]] = sub i64 [[ALIGNMENTCAST]], 1<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], [[MASK]]<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: [[TMP1:%.*]] = zext i32 [[TMP0]] to i64<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[CALL]], i64 [[TMP1]]) ]<br>
// CHECK-NEXT: ret void<br>
//<br>
void t0(int align) {<br>
@@ -25,10 +21,7 @@ void t0(int align) {<br>
// CHECK-NEXT: [[ALIGN_ADDR:%.*]] = alloca i32, align 4<br>
// CHECK-NEXT: store i32 [[ALIGN:%.*]], i32* [[ALIGN_ADDR]], align 4<br>
// CHECK-NEXT: [[CALL:%.*]] = call i8* @alloc(i32 7)<br>
-// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64<br>
-// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 6<br>
-// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+// CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i8* [[CALL]], i64 7) ]<br>
// CHECK-NEXT: ret void<br>
//<br>
void t1(int align) {<br>
<br>
diff --git a/clang/test/OpenMP/simd_codegen.cpp b/clang/test/OpenMP/simd_codegen.cpp<br>
index cb53bb1aa38b..3440225673c4 100644<br>
--- a/clang/test/OpenMP/simd_codegen.cpp<br>
+++ b/clang/test/OpenMP/simd_codegen.cpp<br>
@@ -817,25 +817,9 @@ void parallel_simd(float *a) {<br>
// TERM_DEBUG: !{{[0-9]+}} = !DILocation(line: [[@LINE-11]],<br>
<br>
// CHECK-LABEL: S8<br>
-// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64<br>
-// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64<br>
-// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64<br>
-// CHECK-DAG: ptrtoint [[SS_TY]]* %{{.+}} to i64<br>
-<br>
-// CHECK-DAG: and i64 %{{.+}}, 15<br>
-// CHECK-DAG: icmp eq i64 %{{.+}}, 0<br>
// CHECK-DAG: call void @llvm.assume(i1<br>
-<br>
-// CHECK-DAG: and i64 %{{.+}}, 7<br>
-// CHECK-DAG: icmp eq i64 %{{.+}}, 0<br>
// CHECK-DAG: call void @llvm.assume(i1<br>
-<br>
-// CHECK-DAG: and i64 %{{.+}}, 15<br>
-// CHECK-DAG: icmp eq i64 %{{.+}}, 0<br>
// CHECK-DAG: call void @llvm.assume(i1<br>
-<br>
-// CHECK-DAG: and i64 %{{.+}}, 3<br>
-// CHECK-DAG: icmp eq i64 %{{.+}}, 0<br>
// CHECK-DAG: call void @llvm.assume(i1<br>
struct SS {<br>
SS(): a(0) {}<br>
<br>
diff --git a/clang/test/OpenMP/simd_metadata.c b/clang/test/OpenMP/simd_metadata.c<br>
index f0ae0200dd08..18133e3b6c2e 100644<br>
--- a/clang/test/OpenMP/simd_metadata.c<br>
+++ b/clang/test/OpenMP/simd_metadata.c<br>
@@ -21,30 +21,21 @@ void h1(float *c, float *a, double b[], int size)<br>
// CHECK-LABEL: define void @h1<br>
int t = 0;<br>
#pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b)<br>
-// CHECK: [[C_PTRINT:%.+]] = ptrtoint<br>
-// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31<br>
-// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]])<br>
-// CHECK: [[A_PTRINT:%.+]] = ptrtoint<br>
-<br>
-// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15<br>
-// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31<br>
-// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63<br>
-// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15<br>
-// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15<br>
-<br>
-// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]])<br>
-// CHECK: [[B_PTRINT:%.+]] = ptrtoint<br>
-<br>
-// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15<br>
-// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31<br>
-// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63<br>
-// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15<br>
-// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31<br>
-<br>
-// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]])<br>
+ // CHECK: call void @llvm.assume(i1 true) [ "align"(float* [[PTR4:%.*]], {{i64|i32}} 32) ]<br>
+ // CHECK-NEXT: load<br>
+<br>
+ // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]<br>
+ // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 32) ]<br>
+ // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 64) ]<br>
+ // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]<br>
+ // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]<br>
+ // CHECK-NEXT: load<br>
+<br>
+ // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]<br>
+ // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ]<br>
+ // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 64) ]<br>
+ // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]<br>
+ // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ]<br>
for (int i = 0; i < size; ++i) {<br>
c[i] = a[i] * a[i] + b[i] * b[t];<br>
++t;<br>
@@ -52,30 +43,21 @@ void h1(float *c, float *a, double b[], int size)<br>
// do not emit llvm.access.group metadata due to usage of safelen clause.<br>
// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}}<br>
#pragma omp simd safelen(16) linear(t) aligned(c:32) aligned(a,b) simdlen(8)<br>
-// CHECK: [[C_PTRINT:%.+]] = ptrtoint<br>
-// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31<br>
-// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]])<br>
-// CHECK: [[A_PTRINT:%.+]] = ptrtoint<br>
-<br>
-// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15<br>
-// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31<br>
-// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63<br>
-// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15<br>
-// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15<br>
-<br>
-// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]])<br>
-// CHECK: [[B_PTRINT:%.+]] = ptrtoint<br>
-<br>
-// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15<br>
-// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31<br>
-// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63<br>
-// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15<br>
-// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31<br>
-<br>
-// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]])<br>
+ // CHECK: call void @llvm.assume(i1 true) [ "align"(float* [[PTR4:%.*]], {{i64|i32}} 32) ]<br>
+ // CHECK-NEXT: load<br>
+<br>
+ // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]<br>
+ // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 32) ]<br>
+ // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 64) ]<br>
+ // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]<br>
+ // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]<br>
+ // CHECK-NEXT: load<br>
+<br>
+ // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]<br>
+ // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ]<br>
+ // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 64) ]<br>
+ // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]<br>
+ // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ]<br>
for (int i = 0; i < size; ++i) {<br>
c[i] = a[i] * a[i] + b[i] * b[t];<br>
++t;<br>
@@ -83,30 +65,21 @@ void h1(float *c, float *a, double b[], int size)<br>
// do not emit llvm.access.group metadata due to usage of safelen clause.<br>
// CHECK-NOT: store float {{.+}}, float* {{.+}}, align {{.+}}, !llvm.access.group {{![0-9]+}}<br>
#pragma omp simd linear(t) aligned(c:32) aligned(a,b) simdlen(8)<br>
-// CHECK: [[C_PTRINT:%.+]] = ptrtoint<br>
-// CHECK-NEXT: [[C_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[C_PTRINT]], 31<br>
-// CHECK-NEXT: [[C_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[C_MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[C_MASKCOND]])<br>
-// CHECK: [[A_PTRINT:%.+]] = ptrtoint<br>
-<br>
-// X86-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15<br>
-// X86-AVX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 31<br>
-// X86-AVX512-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 63<br>
-// PPC-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15<br>
-// PPC-QPX-NEXT: [[A_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[A_PTRINT]], 15<br>
-<br>
-// CHECK-NEXT: [[A_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[A_MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[A_MASKCOND]])<br>
-// CHECK: [[B_PTRINT:%.+]] = ptrtoint<br>
-<br>
-// X86-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15<br>
-// X86-AVX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31<br>
-// X86-AVX512-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 63<br>
-// PPC-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 15<br>
-// PPC-QPX-NEXT: [[B_MASKEDPTR:%.+]] = and i{{[0-9]+}} [[B_PTRINT]], 31<br>
-<br>
-// CHECK-NEXT: [[B_MASKCOND:%.+]] = icmp eq i{{[0-9]+}} [[B_MASKEDPTR]], 0<br>
-// CHECK-NEXT: call void @llvm.assume(i1 [[B_MASKCOND]])<br>
+ // CHECK: call void @llvm.assume(i1 true) [ "align"(float* [[PTR4:%.*]], {{i64|i32}} 32) ]<br>
+ // CHECK-NEXT: load<br>
+<br>
+ // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]<br>
+ // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 32) ]<br>
+ // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 64) ]<br>
+ // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]<br>
+ // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[PTR5:%.*]], {{i64|i32}} 16) ]<br>
+ // CHECK-NEXT: load<br>
+<br>
+ // X86-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]<br>
+ // X86-AVX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ]<br>
+ // X86-AVX512-NEXT:call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 64) ]<br>
+ // PPC-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 16) ]<br>
+ // PPC-QPX-NEXT: call void @llvm.assume(i1 true) [ "align"(double* [[PTR6:%.*]], {{i64|i32}} 32) ]<br>
for (int i = 0; i < size; ++i) {<br>
c[i] = a[i] * a[i] + b[i] * b[t];<br>
++t;<br>
<br>
diff --git a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_codegen.cpp b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_codegen.cpp<br>
index 2fc166ed0b87..7192ef454d0a 100644<br>
--- a/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_codegen.cpp<br>
+++ b/clang/test/OpenMP/target_teams_distribute_parallel_for_simd_codegen.cpp<br>
@@ -101,10 +101,7 @@ int target_teams_fun(int *g){<br>
<br>
// CK1: define internal void @[[OUTL1]]({{.+}})<br>
// CK1: [[ARRDECAY:%.+]] = getelementptr inbounds [1000 x i32], [1000 x i32]* %{{.+}}, i{{32|64}} 0, i{{32|64}} 0<br>
- // CK1: [[ARR_CAST:%.+]] = ptrtoint i32* [[ARRDECAY]] to i{{32|64}}<br>
- // CK1: [[MASKED_PTR:%.+]] = and i{{32|64}} [[ARR_CAST]], 7<br>
- // CK1: [[COND:%.+]] = icmp eq i{{32|64}} [[MASKED_PTR]], 0<br>
- // CK1: call void @llvm.assume(i1 [[COND]])<br>
+ // CK1: call void @llvm.assume(i1 true) [ "align"(i32* [[ARRDECAY]], {{i64|i32}} 8) ]<br>
// CK1: call void @__kmpc_for_static_init_4(<br>
// CK1: call void {{.+}} @__kmpc_fork_call(<br>
// CK1: call void @__kmpc_for_static_fini(<br>
<br>
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h<br>
index ffec4ff64ca6..4552ca016bd7 100644<br>
--- a/llvm/include/llvm/IR/IRBuilder.h<br>
+++ b/llvm/include/llvm/IR/IRBuilder.h<br>
@@ -782,7 +782,11 @@ class IRBuilderBase {<br>
<br>
/// Create an assume intrinsic call that allows the optimizer to<br>
/// assume that the provided condition will be true.<br>
- CallInst *CreateAssumption(Value *Cond);<br>
+ ///<br>
+ /// The optional argument \p OpBundles specifies operand bundles that are<br>
+ /// added to the call instruction.<br>
+ CallInst *CreateAssumption(Value *Cond,<br>
+ ArrayRef<OperandBundleDef> OpBundles = llvm::None);<br>
<br>
/// Create a call to the experimental.gc.statepoint intrinsic to<br>
/// start a new statepoint sequence.<br>
@@ -2502,13 +2506,11 @@ class IRBuilderBase {<br>
<br>
private:<br>
/// Helper function that creates an assume intrinsic call that<br>
- /// represents an alignment assumption on the provided Ptr, Mask, Type<br>
- /// and Offset. It may be sometimes useful to do some other logic<br>
- /// based on this alignment check, thus it can be stored into 'TheCheck'.<br>
+ /// represents an alignment assumption on the provided pointer \p PtrValue<br>
+ /// with offset \p OffsetValue and alignment value \p AlignValue.<br>
CallInst *CreateAlignmentAssumptionHelper(const DataLayout &DL,<br>
- Value *PtrValue, Value *Mask,<br>
- Type *IntPtrTy, Value *OffsetValue,<br>
- Value **TheCheck);<br>
+ Value *PtrValue, Value *AlignValue,<br>
+ Value *OffsetValue);<br>
<br>
public:<br>
/// Create an assume intrinsic call that represents an alignment<br>
@@ -2517,13 +2519,9 @@ class IRBuilderBase {<br>
/// An optional offset can be provided, and if it is provided, the offset<br>
/// must be subtracted from the provided pointer to get the pointer with the<br>
/// specified alignment.<br>
- ///<br>
- /// It may be sometimes useful to do some other logic<br>
- /// based on this alignment check, thus it can be stored into 'TheCheck'.<br>
CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,<br>
unsigned Alignment,<br>
- Value *OffsetValue = nullptr,<br>
- Value **TheCheck = nullptr);<br>
+ Value *OffsetValue = nullptr);<br>
<br>
/// Create an assume intrinsic call that represents an alignment<br>
/// assumption on the provided pointer.<br>
@@ -2532,15 +2530,11 @@ class IRBuilderBase {<br>
/// must be subtracted from the provided pointer to get the pointer with the<br>
/// specified alignment.<br>
///<br>
- /// It may be sometimes useful to do some other logic<br>
- /// based on this alignment check, thus it can be stored into 'TheCheck'.<br>
- ///<br>
/// This overload handles the condition where the Alignment is dependent<br>
/// on an existing value rather than a static value.<br>
CallInst *CreateAlignmentAssumption(const DataLayout &DL, Value *PtrValue,<br>
Value *Alignment,<br>
- Value *OffsetValue = nullptr,<br>
- Value **TheCheck = nullptr);<br>
+ Value *OffsetValue = nullptr);<br>
};<br>
<br>
/// This provides a uniform API for creating instructions and inserting<br>
<br>
diff --git a/llvm/include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h b/llvm/include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h<br>
index be119b8ab855..10b6e1c6a21b 100644<br>
--- a/llvm/include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h<br>
+++ b/llvm/include/llvm/Transforms/Scalar/AlignmentFromAssumptions.h<br>
@@ -37,9 +37,9 @@ struct AlignmentFromAssumptionsPass<br>
ScalarEvolution *SE = nullptr;<br>
DominatorTree *DT = nullptr;<br>
<br>
- bool extractAlignmentInfo(CallInst *I, Value *&AAPtr, const SCEV *&AlignSCEV,<br>
- const SCEV *&OffSCEV);<br>
- bool processAssumption(CallInst *I);<br>
+ bool extractAlignmentInfo(CallInst *I, unsigned Idx, Value *&AAPtr,<br>
+ const SCEV *&AlignSCEV, const SCEV *&OffSCEV);<br>
+ bool processAssumption(CallInst *I, unsigned Idx);<br>
};<br>
}<br>
<br>
<br>
diff --git a/llvm/lib/Analysis/AssumeBundleQueries.cpp b/llvm/lib/Analysis/AssumeBundleQueries.cpp<br>
index e9da1e607b45..af81216f6526 100644<br>
--- a/llvm/lib/Analysis/AssumeBundleQueries.cpp<br>
+++ b/llvm/lib/Analysis/AssumeBundleQueries.cpp<br>
@@ -96,10 +96,17 @@ llvm::getKnowledgeFromBundle(CallInst &Assume,<br>
Result.AttrKind = Attribute::getAttrKindFromName(BOI.Tag->getKey());<br>
if (bundleHasArgument(BOI, ABA_WasOn))<br>
Result.WasOn = getValueFromBundleOpInfo(Assume, BOI, ABA_WasOn);<br>
+ auto GetArgOr1 = [&](unsigned Idx) -> unsigned {<br>
+ if (auto *ConstInt = dyn_cast<ConstantInt>(<br>
+ getValueFromBundleOpInfo(Assume, BOI, ABA_Argument + Idx)))<br>
+ return ConstInt->getZExtValue();<br>
+ return 1;<br>
+ };<br>
if (BOI.End - BOI.Begin > ABA_Argument)<br>
- Result.ArgValue =<br>
- cast<ConstantInt>(getValueFromBundleOpInfo(Assume, BOI, ABA_Argument))<br>
- ->getZExtValue();<br>
+ Result.ArgValue = GetArgOr1(0);<br>
+ if (Result.AttrKind == Attribute::Alignment)<br>
+ if (BOI.End - BOI.Begin > ABA_Argument + 1)<br>
+ Result.ArgValue = MinAlign(Result.ArgValue, GetArgOr1(1));<br>
return Result;<br>
}<br>
<br>
<br>
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp<br>
index 1fffce015f70..b87dfe1c8df6 100644<br>
--- a/llvm/lib/IR/IRBuilder.cpp<br>
+++ b/llvm/lib/IR/IRBuilder.cpp<br>
@@ -71,8 +71,9 @@ Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) {<br>
static CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops,<br>
IRBuilderBase *Builder,<br>
const Twine &Name = "",<br>
- Instruction *FMFSource = nullptr) {<br>
- CallInst *CI = Builder->CreateCall(Callee, Ops, Name);<br>
+ Instruction *FMFSource = nullptr,<br>
+ ArrayRef<OperandBundleDef> OpBundles = {}) {<br>
+ CallInst *CI = Builder->CreateCall(Callee, Ops, OpBundles, Name);<br>
if (FMFSource)<br>
CI->copyFastMathFlags(FMFSource);<br>
return CI;<br>
@@ -449,14 +450,16 @@ CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {<br>
return createCallHelper(TheFn, Ops, this);<br>
}<br>
<br>
-CallInst *IRBuilderBase::CreateAssumption(Value *Cond) {<br>
+CallInst *<br>
+IRBuilderBase::CreateAssumption(Value *Cond,<br>
+ ArrayRef<OperandBundleDef> OpBundles) {<br>
assert(Cond->getType() == getInt1Ty() &&<br>
"an assumption condition must be of type i1");<br>
<br>
Value *Ops[] = { Cond };<br>
Module *M = BB->getParent()->getParent();<br>
Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);<br>
- return createCallHelper(FnAssume, Ops, this);<br>
+ return createCallHelper(FnAssume, Ops, this, "", nullptr, OpBundles);<br>
}<br>
<br>
/// Create a call to a Masked Load intrinsic.<br>
@@ -1107,63 +1110,37 @@ Value *IRBuilderBase::CreatePreserveStructAccessIndex(<br>
return Fn;<br>
}<br>
<br>
-CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(<br>
- const DataLayout &DL, Value *PtrValue, Value *Mask, Type *IntPtrTy,<br>
- Value *OffsetValue, Value **TheCheck) {<br>
- Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");<br>
-<br>
- if (OffsetValue) {<br>
- bool IsOffsetZero = false;<br>
- if (const auto *CI = dyn_cast<ConstantInt>(OffsetValue))<br>
- IsOffsetZero = CI->isZero();<br>
-<br>
- if (!IsOffsetZero) {<br>
- if (OffsetValue->getType() != IntPtrTy)<br>
- OffsetValue = CreateIntCast(OffsetValue, IntPtrTy, /*isSigned*/ true,<br>
- "offsetcast");<br>
- PtrIntValue = CreateSub(PtrIntValue, OffsetValue, "offsetptr");<br>
- }<br>
- }<br>
-<br>
- Value *Zero = ConstantInt::get(IntPtrTy, 0);<br>
- Value *MaskedPtr = CreateAnd(PtrIntValue, Mask, "maskedptr");<br>
- Value *InvCond = CreateICmpEQ(MaskedPtr, Zero, "maskcond");<br>
- if (TheCheck)<br>
- *TheCheck = InvCond;<br>
-<br>
- return CreateAssumption(InvCond);<br>
+CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL,<br>
+ Value *PtrValue,<br>
+ Value *AlignValue,<br>
+ Value *OffsetValue) {<br>
+ SmallVector<Value *, 4> Vals({PtrValue, AlignValue});<br>
+ if (OffsetValue)<br>
+ Vals.push_back(OffsetValue);<br>
+ OperandBundleDefT<Value *> AlignOpB("align", Vals);<br>
+ return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB});<br>
}<br>
<br>
-CallInst *IRBuilderBase::CreateAlignmentAssumption(<br>
- const DataLayout &DL, Value *PtrValue, unsigned Alignment,<br>
- Value *OffsetValue, Value **TheCheck) {<br>
+CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,<br>
+ Value *PtrValue,<br>
+ unsigned Alignment,<br>
+ Value *OffsetValue) {<br>
assert(isa<PointerType>(PtrValue->getType()) &&<br>
"trying to create an alignment assumption on a non-pointer?");<br>
assert(Alignment != 0 && "Invalid Alignment");<br>
auto *PtrTy = cast<PointerType>(PtrValue->getType());<br>
Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());<br>
-<br>
- Value *Mask = ConstantInt::get(IntPtrTy, Alignment - 1);<br>
- return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,<br>
- OffsetValue, TheCheck);<br>
+ Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment);<br>
+ return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue);<br>
}<br>
<br>
-CallInst *IRBuilderBase::CreateAlignmentAssumption(<br>
- const DataLayout &DL, Value *PtrValue, Value *Alignment,<br>
- Value *OffsetValue, Value **TheCheck) {<br>
+CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,<br>
+ Value *PtrValue,<br>
+ Value *Alignment,<br>
+ Value *OffsetValue) {<br>
assert(isa<PointerType>(PtrValue->getType()) &&<br>
"trying to create an alignment assumption on a non-pointer?");<br>
- auto *PtrTy = cast<PointerType>(PtrValue->getType());<br>
- Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());<br>
-<br>
- if (Alignment->getType() != IntPtrTy)<br>
- Alignment = CreateIntCast(Alignment, IntPtrTy, /*isSigned*/ false,<br>
- "alignmentcast");<br>
-<br>
- Value *Mask = CreateSub(Alignment, ConstantInt::get(IntPtrTy, 1), "mask");<br>
-<br>
- return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,<br>
- OffsetValue, TheCheck);<br>
+ return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);<br>
}<br>
<br>
IRBuilderDefaultInserter::~IRBuilderDefaultInserter() {}<br>
<br>
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp<br>
index 8fa87b748901..3c8e73a03cc5 100644<br>
--- a/llvm/lib/IR/Verifier.cpp<br>
+++ b/llvm/lib/IR/Verifier.cpp<br>
@@ -4449,21 +4449,32 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {<br>
Assert(Elem.Tag->getKey() == "ignore" ||<br>
Attribute::isExistingAttribute(Elem.Tag->getKey()),<br>
"tags must be valid attribute names");<br>
- Assert(Elem.End - Elem.Begin <= 2, "to many arguments");<br>
Attribute::AttrKind Kind =<br>
Attribute::getAttrKindFromName(Elem.Tag->getKey());<br>
+ unsigned ArgCount = Elem.End - Elem.Begin;<br>
+ if (Kind == Attribute::Alignment) {<br>
+ Assert(ArgCount <= 3 && ArgCount >= 2,<br>
+ "alignment assumptions should have 2 or 3 arguments");<br>
+ Assert(Call.getOperand(Elem.Begin)->getType()->isPointerTy(),<br>
+ "first argument should be a pointer");<br>
+ Assert(Call.getOperand(Elem.Begin + 1)->getType()->isIntegerTy(),<br>
+ "second argument should be an integer");<br>
+ if (ArgCount == 3)<br>
+ Assert(Call.getOperand(Elem.Begin + 2)->getType()->isIntegerTy(),<br>
+ "third argument should be an integer if present");<br>
+ return;<br>
+ }<br>
+ Assert(ArgCount <= 2, "to many arguments");<br>
if (Kind == Attribute::None)<br>
break;<br>
if (Attribute::doesAttrKindHaveArgument(Kind)) {<br>
- Assert(Elem.End - Elem.Begin == 2,<br>
- "this attribute should have 2 arguments");<br>
+ Assert(ArgCount == 2, "this attribute should have 2 arguments");<br>
Assert(isa<ConstantInt>(Call.getOperand(Elem.Begin + 1)),<br>
"the second argument should be a constant integral value");<br>
} else if (isFuncOnlyAttr(Kind)) {<br>
- Assert((Elem.End - Elem.Begin) == 0, "this attribute has no argument");<br>
+ Assert((ArgCount) == 0, "this attribute has no argument");<br>
} else if (!isFuncOrArgAttr(Kind)) {<br>
- Assert((Elem.End - Elem.Begin) == 1,<br>
- "this attribute should have one argument");<br>
+ Assert((ArgCount) == 1, "this attribute should have one argument");<br>
}<br>
}<br>
break;<br>
<br>
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp<br>
index 836af6234ad5..c734c9a68fb2 100644<br>
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp<br>
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp<br>
@@ -4220,11 +4220,16 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {<br>
break;<br>
case Intrinsic::assume: {<br>
Value *IIOperand = II->getArgOperand(0);<br>
+ SmallVector<OperandBundleDef, 4> OpBundles;<br>
+ II->getOperandBundlesAsDefs(OpBundles);<br>
+ bool HasOpBundles = !OpBundles.empty();<br>
// Remove an assume if it is followed by an identical assume.<br>
// TODO: Do we need this? Unless there are conflicting assumptions, the<br>
// computeKnownBits(IIOperand) below here eliminates redundant assumes.<br>
Instruction *Next = II->getNextNonDebugInstruction();<br>
- if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))<br>
+ if (HasOpBundles &&<br>
+ match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))) &&<br>
+ !cast<IntrinsicInst>(Next)->hasOperandBundles())<br>
return eraseInstFromFunction(CI);<br>
<br>
// Canonicalize assume(a && b) -> assume(a); assume(b);<br>
@@ -4234,14 +4239,15 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {<br>
Value *AssumeIntrinsic = II->getCalledOperand();<br>
Value *A, *B;<br>
if (match(IIOperand, m_And(m_Value(A), m_Value(B)))) {<br>
- Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, II->getName());<br>
+ Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles,<br>
+ II->getName());<br>
Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());<br>
return eraseInstFromFunction(*II);<br>
}<br>
// assume(!(a || b)) -> assume(!a); assume(!b);<br>
if (match(IIOperand, m_Not(m_Or(m_Value(A), m_Value(B))))) {<br>
Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,<br>
- Builder.CreateNot(A), II->getName());<br>
+ Builder.CreateNot(A), OpBundles, II->getName());<br>
Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,<br>
Builder.CreateNot(B), II->getName());<br>
return eraseInstFromFunction(*II);<br>
@@ -4257,7 +4263,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {<br>
isValidAssumeForContext(II, LHS, &DT)) {<br>
MDNode *MD = MDNode::get(II->getContext(), None);<br>
LHS->setMetadata(LLVMContext::MD_nonnull, MD);<br>
- return eraseInstFromFunction(*II);<br>
+ if (!HasOpBundles)<br>
+ return eraseInstFromFunction(*II);<br>
<br>
// TODO: apply nonnull return attributes to calls and invokes<br>
// TODO: apply range metadata for range check patterns?<br>
<br>
diff --git a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp<br>
index 5c008585869c..bccf94fc217f 100644<br>
--- a/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp<br>
+++ b/llvm/lib/Transforms/Scalar/AlignmentFromAssumptions.cpp<br>
@@ -15,6 +15,7 @@<br>
//<br>
//===----------------------------------------------------------------------===//<br>
<br>
+#include "llvm/IR/Instructions.h"<br>
#include "llvm/InitializePasses.h"<br>
#define AA_NAME "alignment-from-assumptions"<br>
#define DEBUG_TYPE AA_NAME<br>
@@ -203,103 +204,33 @@ static Align getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,<br>
}<br>
<br>
bool AlignmentFromAssumptionsPass::extractAlignmentInfo(CallInst *I,<br>
+ unsigned Idx,<br>
Value *&AAPtr,<br>
const SCEV *&AlignSCEV,<br>
const SCEV *&OffSCEV) {<br>
- // An alignment assume must be a statement about the least-significant<br>
- // bits of the pointer being zero, possibly with some offset.<br>
- ICmpInst *ICI = dyn_cast<ICmpInst>(I->getArgOperand(0));<br>
- if (!ICI)<br>
+ Type *Int64Ty = Type::getInt64Ty(I->getContext());<br>
+ OperandBundleUse AlignOB = I->getOperandBundleAt(Idx);<br>
+ if (AlignOB.getTagName() != "align")<br>
return false;<br>
-<br>
- // This must be an expression of the form: x & m == 0.<br>
- if (ICI->getPredicate() != ICmpInst::ICMP_EQ)<br>
- return false;<br>
-<br>
- // Swap things around so that the RHS is 0.<br>
- Value *CmpLHS = ICI->getOperand(0);<br>
- Value *CmpRHS = ICI->getOperand(1);<br>
- const SCEV *CmpLHSSCEV = SE->getSCEV(CmpLHS);<br>
- const SCEV *CmpRHSSCEV = SE->getSCEV(CmpRHS);<br>
- if (CmpLHSSCEV->isZero())<br>
- std::swap(CmpLHS, CmpRHS);<br>
- else if (!CmpRHSSCEV->isZero())<br>
- return false;<br>
-<br>
- BinaryOperator *CmpBO = dyn_cast<BinaryOperator>(CmpLHS);<br>
- if (!CmpBO || CmpBO->getOpcode() != Instruction::And)<br>
- return false;<br>
-<br>
- // Swap things around so that the right operand of the and is a constant<br>
- // (the mask); we cannot deal with variable masks.<br>
- Value *AndLHS = CmpBO->getOperand(0);<br>
- Value *AndRHS = CmpBO->getOperand(1);<br>
- const SCEV *AndLHSSCEV = SE->getSCEV(AndLHS);<br>
- const SCEV *AndRHSSCEV = SE->getSCEV(AndRHS);<br>
- if (isa<SCEVConstant>(AndLHSSCEV)) {<br>
- std::swap(AndLHS, AndRHS);<br>
- std::swap(AndLHSSCEV, AndRHSSCEV);<br>
- }<br>
-<br>
- const SCEVConstant *MaskSCEV = dyn_cast<SCEVConstant>(AndRHSSCEV);<br>
- if (!MaskSCEV)<br>
- return false;<br>
-<br>
- // The mask must have some trailing ones (otherwise the condition is<br>
- // trivial and tells us nothing about the alignment of the left operand).<br>
- unsigned TrailingOnes = MaskSCEV->getAPInt().countTrailingOnes();<br>
- if (!TrailingOnes)<br>
- return false;<br>
-<br>
- // Cap the alignment at the maximum with which LLVM can deal (and make sure<br>
- // we don't overflow the shift).<br>
- uint64_t Alignment;<br>
- TrailingOnes = std::min(TrailingOnes,<br>
- unsigned(sizeof(unsigned) * CHAR_BIT - 1));<br>
- Alignment = std::min(1u << TrailingOnes, +Value::MaximumAlignment);<br>
-<br>
- Type *Int64Ty = Type::getInt64Ty(I->getParent()->getParent()->getContext());<br>
- AlignSCEV = SE->getConstant(Int64Ty, Alignment);<br>
-<br>
- // The LHS might be a ptrtoint instruction, or it might be the pointer<br>
- // with an offset.<br>
- AAPtr = nullptr;<br>
- OffSCEV = nullptr;<br>
- if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(AndLHS)) {<br>
- AAPtr = PToI->getPointerOperand();<br>
+ assert(AlignOB.Inputs.size() >= 2);<br>
+ AAPtr = AlignOB.Inputs[0].get();<br>
+ // TODO: Consider accumulating the offset to the base.<br>
+ AAPtr = AAPtr->stripPointerCastsSameRepresentation();<br>
+ AlignSCEV = SE->getSCEV(AlignOB.Inputs[1].get());<br>
+ AlignSCEV = SE->getTruncateOrZeroExtend(AlignSCEV, Int64Ty);<br>
+ if (AlignOB.Inputs.size() == 3)<br>
+ OffSCEV = SE->getSCEV(AlignOB.Inputs[2].get());<br>
+ else<br>
OffSCEV = SE->getZero(Int64Ty);<br>
- } else if (const SCEVAddExpr* AndLHSAddSCEV =<br>
- dyn_cast<SCEVAddExpr>(AndLHSSCEV)) {<br>
- // Try to find the ptrtoint; subtract it and the rest is the offset.<br>
- for (SCEVAddExpr::op_iterator J = AndLHSAddSCEV->op_begin(),<br>
- JE = AndLHSAddSCEV->op_end(); J != JE; ++J)<br>
- if (const SCEVUnknown *OpUnk = dyn_cast<SCEVUnknown>(*J))<br>
- if (PtrToIntInst *PToI = dyn_cast<PtrToIntInst>(OpUnk->getValue())) {<br>
- AAPtr = PToI->getPointerOperand();<br>
- OffSCEV = SE->getMinusSCEV(AndLHSAddSCEV, *J);<br>
- break;<br>
- }<br>
- }<br>
-<br>
- if (!AAPtr)<br>
- return false;<br>
-<br>
- // Sign extend the offset to 64 bits (so that it is like all of the other<br>
- // expressions).<br>
- unsigned OffSCEVBits = OffSCEV->getType()->getPrimitiveSizeInBits();<br>
- if (OffSCEVBits < 64)<br>
- OffSCEV = SE->getSignExtendExpr(OffSCEV, Int64Ty);<br>
- else if (OffSCEVBits > 64)<br>
- return false;<br>
-<br>
- AAPtr = AAPtr->stripPointerCasts();<br>
+ OffSCEV = SE->getTruncateOrZeroExtend(OffSCEV, Int64Ty);<br>
return true;<br>
}<br>
<br>
-bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {<br>
+bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall,<br>
+ unsigned Idx) {<br>
Value *AAPtr;<br>
const SCEV *AlignSCEV, *OffSCEV;<br>
- if (!extractAlignmentInfo(ACall, AAPtr, AlignSCEV, OffSCEV))<br>
+ if (!extractAlignmentInfo(ACall, Idx, AAPtr, AlignSCEV, OffSCEV))<br>
return false;<br>
<br>
// Skip ConstantPointerNull and UndefValue. Assumptions on these shouldn't<br>
@@ -317,13 +248,14 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {<br>
continue;<br>
<br>
if (Instruction *K = dyn_cast<Instruction>(J))<br>
- if (isValidAssumeForContext(ACall, K, DT))<br>
WorkList.push_back(K);<br>
}<br>
<br>
while (!WorkList.empty()) {<br>
Instruction *J = WorkList.pop_back_val();<br>
if (LoadInst *LI = dyn_cast<LoadInst>(J)) {<br>
+ if (!isValidAssumeForContext(ACall, J, DT))<br>
+ continue;<br>
Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,<br>
LI->getPointerOperand(), SE);<br>
if (NewAlignment > LI->getAlign()) {<br>
@@ -331,6 +263,8 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {<br>
++NumLoadAlignChanged;<br>
}<br>
} else if (StoreInst *SI = dyn_cast<StoreInst>(J)) {<br>
+ if (!isValidAssumeForContext(ACall, J, DT))<br>
+ continue;<br>
Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,<br>
SI->getPointerOperand(), SE);<br>
if (NewAlignment > SI->getAlign()) {<br>
@@ -338,6 +272,8 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {<br>
++NumStoreAlignChanged;<br>
}<br>
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(J)) {<br>
+ if (!isValidAssumeForContext(ACall, J, DT))<br>
+ continue;<br>
Align NewDestAlignment =<br>
getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE);<br>
<br>
@@ -369,7 +305,7 @@ bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall) {<br>
Visited.insert(J);<br>
for (User *UJ : J->users()) {<br>
Instruction *K = cast<Instruction>(UJ);<br>
- if (!Visited.count(K) && isValidAssumeForContext(ACall, K, DT))<br>
+ if (!Visited.count(K))<br>
WorkList.push_back(K);<br>
}<br>
}<br>
@@ -396,8 +332,11 @@ bool AlignmentFromAssumptionsPass::runImpl(Function &F, AssumptionCache &AC,<br>
<br>
bool Changed = false;<br>
for (auto &AssumeVH : AC.assumptions())<br>
- if (AssumeVH)<br>
- Changed |= processAssumption(cast<CallInst>(AssumeVH));<br>
+ if (AssumeVH) {<br>
+ CallInst *Call = cast<CallInst>(AssumeVH);<br>
+ for (unsigned Idx = 0; Idx < Call->getNumOperandBundles(); Idx++)<br>
+ Changed |= processAssumption(Call, Idx);<br>
+ }<br>
<br>
return Changed;<br>
}<br>
<br>
diff --git a/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll b/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll<br>
index 14e764f042c7..610fd448c3b9 100644<br>
--- a/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll<br>
+++ b/llvm/test/Transforms/AlignmentFromAssumptions/simple.ll<br>
@@ -4,10 +4,7 @@ target datalayout = "e-i64:64-f80:128-n8:16:32:64-S128"<br>
<br>
define i32 @foo(i32* nocapture %a) nounwind uwtable readonly {<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32)]<br>
%0 = load i32, i32* %a, align 4<br>
ret i32 %0<br>
<br>
@@ -18,11 +15,7 @@ entry:<br>
<br>
define i32 @foo2(i32* nocapture %a) nounwind uwtable readonly {<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %offsetptr = add i64 %ptrint, 24<br>
- %maskedptr = and i64 %offsetptr, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32, i32 24)]<br>
%arrayidx = getelementptr inbounds i32, i32* %a, i64 2<br>
%0 = load i32, i32* %arrayidx, align 4<br>
ret i32 %0<br>
@@ -34,11 +27,7 @@ entry:<br>
<br>
define i32 @foo2a(i32* nocapture %a) nounwind uwtable readonly {<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %offsetptr = add i64 %ptrint, 28<br>
- %maskedptr = and i64 %offsetptr, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32, i32 28)]<br>
%arrayidx = getelementptr inbounds i32, i32* %a, i64 -1<br>
%0 = load i32, i32* %arrayidx, align 4<br>
ret i32 %0<br>
@@ -50,10 +39,7 @@ entry:<br>
<br>
define i32 @goo(i32* nocapture %a) nounwind uwtable readonly {<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ tail call void @llvm.assume(i1 true) ["align"(i32* %a, i32 32, i32 0)]<br>
%0 = load i32, i32* %a, align 4<br>
ret i32 %0<br>
<br>
@@ -64,10 +50,7 @@ entry:<br>
<br>
define i32 @hoo(i32* nocapture %a) nounwind uwtable readonly {<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ tail call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32, i32 0)]<br>
br label %for.body<br>
<br>
for.body: ; preds = %entry, %for.body<br>
@@ -98,10 +81,7 @@ for.end: ; preds = %for.body<br>
; load(a, i0+i1+i2+32)<br>
define void @hoo2(i32* nocapture %a, i64 %id, i64 %num) nounwind uwtable readonly {<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ tail call void @llvm.assume(i1 true) ["align"(i32* %a, i8 32, i64 0)]<br>
%id.mul = shl nsw i64 %id, 6<br>
%num.mul = shl nsw i64 %num, 6<br>
br label %for0.body<br>
@@ -147,10 +127,7 @@ return:<br>
<br>
define i32 @joo(i32* nocapture %a) nounwind uwtable readonly {<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ tail call void @llvm.assume(i1 true) ["align"(i32* %a, i8 32, i8 0)]<br>
br label %for.body<br>
<br>
for.body: ; preds = %entry, %for.body<br>
@@ -175,16 +152,13 @@ for.end: ; preds = %for.body<br>
<br>
define i32 @koo(i32* nocapture %a) nounwind uwtable readonly {<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
br label %for.body<br>
<br>
for.body: ; preds = %entry, %for.body<br>
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]<br>
%r.06 = phi i32 [ 0, %entry ], [ %add, %for.body ]<br>
%arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv<br>
+ tail call void @llvm.assume(i1 true) ["align"(i32* %a, i8 32, i8 0)]<br>
%0 = load i32, i32* %arrayidx, align 4<br>
%add = add nsw i32 %0, %r.06<br>
%indvars.iv.next = add i64 %indvars.iv, 4<br>
@@ -203,10 +177,7 @@ for.end: ; preds = %for.body<br>
<br>
define i32 @koo2(i32* nocapture %a) nounwind uwtable readonly {<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ tail call void @llvm.assume(i1 true) ["align"(i32* %a, i128 32, i128 0)]<br>
br label %for.body<br>
<br>
for.body: ; preds = %entry, %for.body<br>
@@ -231,10 +202,7 @@ for.end: ; preds = %for.body<br>
<br>
define i32 @moo(i32* nocapture %a) nounwind uwtable {<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ tail call void @llvm.assume(i1 true) ["align"(i32* %a, i16 32)]<br>
%0 = bitcast i32* %a to i8*<br>
tail call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 64, i1 false)<br>
ret i32 undef<br>
@@ -246,15 +214,9 @@ entry:<br>
<br>
define i32 @moo2(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
- %ptrint1 = ptrtoint i32* %b to i64<br>
- %maskedptr3 = and i64 %ptrint1, 127<br>
- %maskcond4 = icmp eq i64 %maskedptr3, 0<br>
- tail call void @llvm.assume(i1 %maskcond4)<br>
+ tail call void @llvm.assume(i1 true) ["align"(i32* %b, i32 128)]<br>
%0 = bitcast i32* %a to i8*<br>
+ tail call void @llvm.assume(i1 true) ["align"(i8* %0, i16 32)]<br>
%1 = bitcast i32* %b to i8*<br>
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 64, i1 false)<br>
ret i32 undef<br>
@@ -264,6 +226,19 @@ entry:<br>
; CHECK: ret i32 undef<br>
}<br>
<br>
+define i32 @moo3(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {<br>
+entry:<br>
+ %0 = bitcast i32* %a to i8*<br>
+ tail call void @llvm.assume(i1 true) ["align"(i8* %0, i16 32), "align"(i32* %b, i32 128)]<br>
+ %1 = bitcast i32* %b to i8*<br>
+ tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 64, i1 false)<br>
+ ret i32 undef<br>
+<br>
+; CHECK-LABEL: @moo3<br>
+; CHECK: @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 %0, i8* align 128 %1, i64 64, i1 false)<br>
+; CHECK: ret i32 undef<br>
+}<br>
+<br>
declare void @llvm.assume(i1) nounwind<br>
<br>
declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind<br>
<br>
diff --git a/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll b/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll<br>
index 3f0819e3641b..453899c15c4f 100644<br>
--- a/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll<br>
+++ b/llvm/test/Transforms/AlignmentFromAssumptions/simple32.ll<br>
@@ -7,18 +7,12 @@ define i32 @foo(i32* nocapture %a) nounwind uwtable readonly {<br>
; CHECK-LABEL: define {{[^@]+}}@foo<br>
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0<br>
; CHECK-NEXT: entry:<br>
-; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64<br>
-; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31<br>
-; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])<br>
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]<br>
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 32<br>
; CHECK-NEXT: ret i32 [[TMP0]]<br>
;<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]<br>
%0 = load i32, i32* %a, align 4<br>
ret i32 %0<br>
<br>
@@ -28,21 +22,13 @@ define i32 @foo2(i32* nocapture %a) nounwind uwtable readonly {<br>
; CHECK-LABEL: define {{[^@]+}}@foo2<br>
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0<br>
; CHECK-NEXT: entry:<br>
-; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64<br>
-; CHECK-NEXT: [[OFFSETPTR:%.*]] = add i64 [[PTRINT]], 24<br>
-; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 31<br>
-; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])<br>
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32, i64 24) ]<br>
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 2<br>
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 16<br>
; CHECK-NEXT: ret i32 [[TMP0]]<br>
;<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %offsetptr = add i64 %ptrint, 24<br>
- %maskedptr = and i64 %offsetptr, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32, i64 24)]<br>
%arrayidx = getelementptr inbounds i32, i32* %a, i64 2<br>
%0 = load i32, i32* %arrayidx, align 4<br>
ret i32 %0<br>
@@ -53,21 +39,13 @@ define i32 @foo2a(i32* nocapture %a) nounwind uwtable readonly {<br>
; CHECK-LABEL: define {{[^@]+}}@foo2a<br>
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0<br>
; CHECK-NEXT: entry:<br>
-; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64<br>
-; CHECK-NEXT: [[OFFSETPTR:%.*]] = add i64 [[PTRINT]], 28<br>
-; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[OFFSETPTR]], 31<br>
-; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])<br>
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32, i64 28) ]<br>
; CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 -1<br>
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[ARRAYIDX]], align 32<br>
; CHECK-NEXT: ret i32 [[TMP0]]<br>
;<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %offsetptr = add i64 %ptrint, 28<br>
- %maskedptr = and i64 %offsetptr, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32, i64 28)]<br>
%arrayidx = getelementptr inbounds i32, i32* %a, i64 -1<br>
%0 = load i32, i32* %arrayidx, align 4<br>
ret i32 %0<br>
@@ -78,18 +56,12 @@ define i32 @goo(i32* nocapture %a) nounwind uwtable readonly {<br>
; CHECK-LABEL: define {{[^@]+}}@goo<br>
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0<br>
; CHECK-NEXT: entry:<br>
-; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64<br>
-; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31<br>
-; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])<br>
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]<br>
; CHECK-NEXT: [[TMP0:%.*]] = load i32, i32* [[A]], align 32<br>
; CHECK-NEXT: ret i32 [[TMP0]]<br>
;<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]<br>
%0 = load i32, i32* %a, align 4<br>
ret i32 %0<br>
<br>
@@ -99,10 +71,7 @@ define i32 @hoo(i32* nocapture %a) nounwind uwtable readonly {<br>
; CHECK-LABEL: define {{[^@]+}}@hoo<br>
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0<br>
; CHECK-NEXT: entry:<br>
-; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64<br>
-; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31<br>
-; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])<br>
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]<br>
; CHECK-NEXT: br label [[FOR_BODY:%.*]]<br>
; CHECK: for.body:<br>
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]<br>
@@ -119,10 +88,7 @@ define i32 @hoo(i32* nocapture %a) nounwind uwtable readonly {<br>
; CHECK-NEXT: ret i32 [[ADD_LCSSA]]<br>
;<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]<br>
br label %for.body<br>
<br>
for.body: ; preds = %entry, %for.body<br>
@@ -146,10 +112,7 @@ define i32 @joo(i32* nocapture %a) nounwind uwtable readonly {<br>
; CHECK-LABEL: define {{[^@]+}}@joo<br>
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0<br>
; CHECK-NEXT: entry:<br>
-; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64<br>
-; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31<br>
-; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])<br>
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]<br>
; CHECK-NEXT: br label [[FOR_BODY:%.*]]<br>
; CHECK: for.body:<br>
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 4, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]<br>
@@ -166,10 +129,7 @@ define i32 @joo(i32* nocapture %a) nounwind uwtable readonly {<br>
; CHECK-NEXT: ret i32 [[ADD_LCSSA]]<br>
;<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]<br>
br label %for.body<br>
<br>
for.body: ; preds = %entry, %for.body<br>
@@ -193,10 +153,7 @@ define i32 @koo(i32* nocapture %a) nounwind uwtable readonly {<br>
; CHECK-LABEL: define {{[^@]+}}@koo<br>
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0<br>
; CHECK-NEXT: entry:<br>
-; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64<br>
-; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31<br>
-; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])<br>
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]<br>
; CHECK-NEXT: br label [[FOR_BODY:%.*]]<br>
; CHECK: for.body:<br>
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]<br>
@@ -213,10 +170,7 @@ define i32 @koo(i32* nocapture %a) nounwind uwtable readonly {<br>
; CHECK-NEXT: ret i32 [[ADD_LCSSA]]<br>
;<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]<br>
br label %for.body<br>
<br>
for.body: ; preds = %entry, %for.body<br>
@@ -240,10 +194,7 @@ define i32 @koo2(i32* nocapture %a) nounwind uwtable readonly {<br>
; CHECK-LABEL: define {{[^@]+}}@koo2<br>
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #0<br>
; CHECK-NEXT: entry:<br>
-; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64<br>
-; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31<br>
-; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])<br>
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]<br>
; CHECK-NEXT: br label [[FOR_BODY:%.*]]<br>
; CHECK: for.body:<br>
; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ -4, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]<br>
@@ -260,10 +211,7 @@ define i32 @koo2(i32* nocapture %a) nounwind uwtable readonly {<br>
; CHECK-NEXT: ret i32 [[ADD_LCSSA]]<br>
;<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]<br>
br label %for.body<br>
<br>
for.body: ; preds = %entry, %for.body<br>
@@ -287,19 +235,13 @@ define i32 @moo(i32* nocapture %a) nounwind uwtable {<br>
; CHECK-LABEL: define {{[^@]+}}@moo<br>
; CHECK-SAME: (i32* nocapture [[A:%.*]]) #1<br>
; CHECK-NEXT: entry:<br>
-; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64<br>
-; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31<br>
-; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])<br>
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]<br>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A]] to i8*<br>
; CHECK-NEXT: tail call void @llvm.memset.p0i8.i64(i8* align 32 [[TMP0]], i8 0, i64 64, i1 false)<br>
; CHECK-NEXT: ret i32 undef<br>
;<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
+ call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]<br>
%0 = bitcast i32* %a to i8*<br>
tail call void @llvm.memset.p0i8.i64(i8* align 4 %0, i8 0, i64 64, i1 false)<br>
ret i32 undef<br>
@@ -310,28 +252,16 @@ define i32 @moo2(i32* nocapture %a, i32* nocapture %b) nounwind uwtable {<br>
; CHECK-LABEL: define {{[^@]+}}@moo2<br>
; CHECK-SAME: (i32* nocapture [[A:%.*]], i32* nocapture [[B:%.*]]) #1<br>
; CHECK-NEXT: entry:<br>
-; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i32* [[A]] to i64<br>
-; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31<br>
-; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])<br>
-; CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint i32* [[B]] to i64<br>
-; CHECK-NEXT: [[MASKEDPTR3:%.*]] = and i64 [[PTRINT1]], 127<br>
-; CHECK-NEXT: [[MASKCOND4:%.*]] = icmp eq i64 [[MASKEDPTR3]], 0<br>
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[MASKCOND4]])<br>
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64 32) ]<br>
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(i32* [[B]], i64 128) ]<br>
; CHECK-NEXT: [[TMP0:%.*]] = bitcast i32* [[A]] to i8*<br>
; CHECK-NEXT: [[TMP1:%.*]] = bitcast i32* [[B]] to i8*<br>
; CHECK-NEXT: tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 32 [[TMP0]], i8* align 128 [[TMP1]], i64 64, i1 false)<br>
; CHECK-NEXT: ret i32 undef<br>
;<br>
entry:<br>
- %ptrint = ptrtoint i32* %a to i64<br>
- %maskedptr = and i64 %ptrint, 31<br>
- %maskcond = icmp eq i64 %maskedptr, 0<br>
- tail call void @llvm.assume(i1 %maskcond)<br>
- %ptrint1 = ptrtoint i32* %b to i64<br>
- %maskedptr3 = and i64 %ptrint1, 127<br>
- %maskcond4 = icmp eq i64 %maskedptr3, 0<br>
- tail call void @llvm.assume(i1 %maskcond4)<br>
+ call void @llvm.assume(i1 true) ["align"(i32* %a, i64 32)]<br>
+ call void @llvm.assume(i1 true) ["align"(i32* %b, i64 128)]<br>
%0 = bitcast i32* %a to i8*<br>
%1 = bitcast i32* %b to i8*<br>
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 %0, i8* align 4 %1, i64 64, i1 false)<br>
<br>
diff --git a/llvm/test/Transforms/Inline/align.ll b/llvm/test/Transforms/Inline/align.ll<br>
index ede6c3fa7bcf..f3a518456485 100644<br>
--- a/llvm/test/Transforms/Inline/align.ll<br>
+++ b/llvm/test/Transforms/Inline/align.ll<br>
@@ -23,10 +23,7 @@ define void @foo(float* nocapture %a, float* nocapture readonly %c) #0 {<br>
; CHECK-LABEL: define {{[^@]+}}@foo<br>
; CHECK-SAME: (float* nocapture [[A:%.*]], float* nocapture readonly [[C:%.*]]) #0<br>
; CHECK-NEXT: entry:<br>
-; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[A]] to i64<br>
-; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 127<br>
-; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-; CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[A]], i64 128) ]<br>
; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[C]], align 4<br>
; CHECK-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5<br>
; CHECK-NEXT: store float [[TMP0]], float* [[ARRAYIDX_I]], align 4<br>
@@ -87,14 +84,8 @@ define void @foo2(float* nocapture %a, float* nocapture %b, float* nocapture rea<br>
; CHECK-LABEL: define {{[^@]+}}@foo2<br>
; CHECK-SAME: (float* nocapture [[A:%.*]], float* nocapture [[B:%.*]], float* nocapture readonly [[C:%.*]]) #0<br>
; CHECK-NEXT: entry:<br>
-; CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint float* [[A]] to i64<br>
-; CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 127<br>
-; CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-; CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
-; CHECK-NEXT: [[PTRINT1:%.*]] = ptrtoint float* [[B]] to i64<br>
-; CHECK-NEXT: [[MASKEDPTR2:%.*]] = and i64 [[PTRINT1]], 127<br>
-; CHECK-NEXT: [[MASKCOND3:%.*]] = icmp eq i64 [[MASKEDPTR2]], 0<br>
-; CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND3]])<br>
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[A]], i64 128) ]<br>
+; CHECK-NEXT: call void @llvm.assume(i1 true) [ "align"(float* [[B]], i64 128) ]<br>
; CHECK-NEXT: [[TMP0:%.*]] = load float, float* [[C]], align 4<br>
; CHECK-NEXT: [[ARRAYIDX_I:%.*]] = getelementptr inbounds float, float* [[A]], i64 5<br>
; CHECK-NEXT: store float [[TMP0]], float* [[ARRAYIDX_I]], align 4<br>
<br>
diff --git a/llvm/test/Transforms/InstCombine/assume.ll b/llvm/test/Transforms/InstCombine/assume.ll<br>
index 6f33e83ee336..b372f52a2cdf 100644<br>
--- a/llvm/test/Transforms/InstCombine/assume.ll<br>
+++ b/llvm/test/Transforms/InstCombine/assume.ll<br>
@@ -377,6 +377,7 @@ define i32 @assumption_conflicts_with_known_bits(i32 %a, i32 %b) {<br>
define void @debug_interference(i8 %x) {<br>
; CHECK-LABEL: @debug_interference(<br>
; CHECK-NEXT: [[CMP2:%.*]] = icmp ne i8 [[X:%.*]], 0<br>
+; CHECK-NEXT: tail call void @llvm.assume(i1 false)<br>
; CHECK-NEXT: tail call void @llvm.dbg.value(metadata i32 5, metadata !7, metadata !DIExpression()), !dbg !9<br>
; CHECK-NEXT: tail call void @llvm.assume(i1 false)<br>
; CHECK-NEXT: tail call void @llvm.dbg.value(metadata i32 5, metadata !7, metadata !DIExpression()), !dbg !9<br>
<br>
diff --git a/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll b/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll<br>
index 61287e35005f..2605701d231d 100644<br>
--- a/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll<br>
+++ b/llvm/test/Transforms/PhaseOrdering/inlining-alignment-assumptions.ll<br>
@@ -41,10 +41,7 @@ define void @caller1(i1 %c, i64* align 1 %ptr) {<br>
; ASSUMPTIONS-ON-NEXT: br i1 [[C:%.*]], label [[TRUE2_CRITEDGE:%.*]], label [[FALSE1:%.*]]<br>
; ASSUMPTIONS-ON: false1:<br>
; ASSUMPTIONS-ON-NEXT: store volatile i64 1, i64* [[PTR:%.*]], align 8<br>
-; ASSUMPTIONS-ON-NEXT: [[PTRINT:%.*]] = ptrtoint i64* [[PTR]] to i64<br>
-; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7<br>
-; ASSUMPTIONS-ON-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-; ASSUMPTIONS-ON-NEXT: tail call void @llvm.assume(i1 [[MASKCOND]])<br>
+; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(i64* [[PTR]], i64 8) ]<br>
; ASSUMPTIONS-ON-NEXT: store volatile i64 0, i64* [[PTR]], align 8<br>
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8<br>
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8<br>
@@ -54,10 +51,7 @@ define void @caller1(i1 %c, i64* align 1 %ptr) {<br>
; ASSUMPTIONS-ON-NEXT: store volatile i64 3, i64* [[PTR]], align 8<br>
; ASSUMPTIONS-ON-NEXT: ret void<br>
; ASSUMPTIONS-ON: true2.critedge:<br>
-; ASSUMPTIONS-ON-NEXT: [[PTRINT_C:%.*]] = ptrtoint i64* [[PTR]] to i64<br>
-; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR_C:%.*]] = and i64 [[PTRINT_C]], 7<br>
-; ASSUMPTIONS-ON-NEXT: [[MASKCOND_C:%.*]] = icmp eq i64 [[MASKEDPTR_C]], 0<br>
-; ASSUMPTIONS-ON-NEXT: tail call void @llvm.assume(i1 [[MASKCOND_C]])<br>
+; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 true) [ "align"(i64* [[PTR]], i64 8) ]<br>
; ASSUMPTIONS-ON-NEXT: store volatile i64 0, i64* [[PTR]], align 8<br>
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8<br>
; ASSUMPTIONS-ON-NEXT: store volatile i64 -1, i64* [[PTR]], align 8<br>
@@ -94,26 +88,17 @@ false2:<br>
; This test checks that alignment assumptions do not prevent SROA.<br>
; See PR45763.<br>
<br>
-define internal void @callee2(i64* noalias sret align 8 %arg) {<br>
+define internal void @callee2(i64* noalias sret align 32 %arg) {<br>
store i64 0, i64* %arg, align 8<br>
ret void<br>
}<br>
<br>
define amdgpu_kernel void @caller2() {<br>
-; ASSUMPTIONS-OFF-LABEL: @caller2(<br>
-; ASSUMPTIONS-OFF-NEXT: ret void<br>
-;<br>
-; ASSUMPTIONS-ON-LABEL: @caller2(<br>
-; ASSUMPTIONS-ON-NEXT: [[ALLOCA:%.*]] = alloca i64, align 8, addrspace(5)<br>
-; ASSUMPTIONS-ON-NEXT: [[CAST:%.*]] = addrspacecast i64 addrspace(5)* [[ALLOCA]] to i64*<br>
-; ASSUMPTIONS-ON-NEXT: [[PTRINT:%.*]] = ptrtoint i64* [[CAST]] to i64<br>
-; ASSUMPTIONS-ON-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 7<br>
-; ASSUMPTIONS-ON-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0<br>
-; ASSUMPTIONS-ON-NEXT: call void @llvm.assume(i1 [[MASKCOND]])<br>
-; ASSUMPTIONS-ON-NEXT: ret void<br>
+; CHECK-LABEL: @caller2(<br>
+; CHECK-NEXT: ret void<br>
;<br>
%alloca = alloca i64, align 8, addrspace(5)<br>
%cast = addrspacecast i64 addrspace(5)* %alloca to i64*<br>
- call void @callee2(i64* sret align 8 %cast)<br>
+ call void @callee2(i64* sret align 32 %cast)<br>
ret void<br>
}<br>
<br>
diff --git a/llvm/test/Verifier/assume-bundles.ll b/llvm/test/Verifier/assume-bundles.ll<br>
index 302421715c79..6e260f25129e 100644<br>
--- a/llvm/test/Verifier/assume-bundles.ll<br>
+++ b/llvm/test/Verifier/assume-bundles.ll<br>
@@ -1,3 +1,4 @@<br>
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py<br>
; RUN: not opt -verify < %s 2>&1 | FileCheck %s<br>
<br>
declare void @llvm.assume(i1)<br>
@@ -6,14 +7,21 @@ define void @func(i32* %P, i32 %P1, i32* %P2, i32* %P3) {<br>
; CHECK: tags must be valid attribute names<br>
call void @llvm.assume(i1 true) ["adazdazd"()]<br>
; CHECK: the second argument should be a constant integral value<br>
- call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1)]<br>
+ call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P, i32 %P1)]<br>
; CHECK: to many arguments<br>
- call void @llvm.assume(i1 true) ["align"(i32* %P, i32 8, i32 8)]<br>
+ call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P, i32 8, i32 8)]<br>
; CHECK: this attribute should have 2 arguments<br>
- call void @llvm.assume(i1 true) ["align"(i32* %P)]<br>
+ call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P)]<br>
; CHECK: this attribute has no argument<br>
- call void @llvm.assume(i1 true) ["align"(i32* %P, i32 4), "cold"(i32* %P)]<br>
+ call void @llvm.assume(i1 true) ["dereferenceable"(i32* %P, i32 4), "cold"(i32* %P)]<br>
; CHECK: this attribute should have one argument<br>
call void @llvm.assume(i1 true) ["noalias"()]<br>
+ call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1, i32 4)]<br>
+; CHECK: alignment assumptions should have 2 or 3 arguments<br>
+ call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1, i32 4, i32 4)]<br>
+; CHECK: second argument should be an integer<br>
+ call void @llvm.assume(i1 true) ["align"(i32* %P, i32* %P2)]<br>
+; CHECK: third argument should be an integer if present<br>
+ call void @llvm.assume(i1 true) ["align"(i32* %P, i32 %P1, i32* %P2)]<br>
ret void<br>
}<br>
<br>
diff --git a/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp b/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp<br>
index d35a77fa379b..946368e1cb94 100644<br>
--- a/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp<br>
+++ b/llvm/unittests/Analysis/AssumeBundleQueriesTest.cpp<br>
@@ -546,3 +546,41 @@ TEST(AssumeQueryAPI, AssumptionCache) {<br>
ASSERT_EQ(AR[0].Index, 1u);<br>
ASSERT_EQ(AR[0].Assume, &*First);<br>
}<br>
+<br>
+TEST(AssumeQueryAPI, Alignment) {<br>
+ LLVMContext C;<br>
+ SMDiagnostic Err;<br>
+ std::unique_ptr<Module> Mod = parseAssemblyString(<br>
+ "declare void @llvm.assume(i1)\n"<br>
+ "define void @test(i32* %P, i32* %P1, i32* %P2, i32 %I3, i1 %B) {\n"<br>
+ "call void @llvm.assume(i1 true) [\"align\"(i32* %P, i32 8, i32 %I3)]\n"<br>
+ "call void @llvm.assume(i1 true) [\"align\"(i32* %P1, i32 %I3, i32 "<br>
+ "%I3)]\n"<br>
+ "call void @llvm.assume(i1 true) [\"align\"(i32* %P2, i32 16, i32 8)]\n"<br>
+ "ret void\n}\n",<br>
+ Err, C);<br>
+ if (!Mod)<br>
+ Err.print("AssumeQueryAPI", errs());<br>
+<br>
+ Function *F = Mod->getFunction("test");<br>
+ BasicBlock::iterator Start = F->begin()->begin();<br>
+ IntrinsicInst *II;<br>
+ RetainedKnowledge RK;<br>
+ II = cast<IntrinsicInst>(&*Start);<br>
+ RK = getKnowledgeFromBundle(*II, II->bundle_op_info_begin()[0]);<br>
+ ASSERT_EQ(RK.AttrKind, Attribute::Alignment);<br>
+ ASSERT_EQ(RK.WasOn, F->getArg(0));<br>
+ ASSERT_EQ(RK.ArgValue, 1u);<br>
+ Start++;<br>
+ II = cast<IntrinsicInst>(&*Start);<br>
+ RK = getKnowledgeFromBundle(*II, II->bundle_op_info_begin()[0]);<br>
+ ASSERT_EQ(RK.AttrKind, Attribute::Alignment);<br>
+ ASSERT_EQ(RK.WasOn, F->getArg(1));<br>
+ ASSERT_EQ(RK.ArgValue, 1u);<br>
+ Start++;<br>
+ II = cast<IntrinsicInst>(&*Start);<br>
+ RK = getKnowledgeFromBundle(*II, II->bundle_op_info_begin()[0]);<br>
+ ASSERT_EQ(RK.AttrKind, Attribute::Alignment);<br>
+ ASSERT_EQ(RK.WasOn, F->getArg(2));<br>
+ ASSERT_EQ(RK.ArgValue, 8u);<br>
+}<br>
<br>
<br>
<br>
_______________________________________________<br>
llvm-commits mailing list<br>
<a href="mailto:llvm-commits@lists.llvm.org" target="_blank">llvm-commits@lists.llvm.org</a><br>
<a href="https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits" rel="noreferrer" target="_blank">https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits</a><br>
</blockquote></div>