[llvm] c726fff - [InstCombine][profcheck] Add unknown branch weights to selects created in InstCombineAndOrXor.cpp (#175269)
via llvm-commits
llvm-commits at lists.llvm.org
Sun Jan 11 13:23:32 PST 2026
Author: Alan Zhao
Date: 2026-01-11T13:23:27-08:00
New Revision: c726fff61cb3bd5a3a98c52bb63b99fcb3b25524
URL: https://github.com/llvm/llvm-project/commit/c726fff61cb3bd5a3a98c52bb63b99fcb3b25524
DIFF: https://github.com/llvm/llvm-project/commit/c726fff61cb3bd5a3a98c52bb63b99fcb3b25524.diff
LOG: [InstCombine][profcheck] Add unknown branch weights to selects created in InstCombineAndOrXor.cpp (#175269)
These select instructions were created from combinations of bitwise
operators which have no branch weight information.
Tracking issue: #147390
Added:
Modified:
llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
llvm/test/Transforms/InstCombine/and.ll
llvm/test/Transforms/InstCombine/binop-cast.ll
llvm/test/Transforms/InstCombine/conditional-negation.ll
llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll
llvm/test/Transforms/InstCombine/xor-ashr.ll
llvm/utils/profcheck-xfail.txt
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
index 0d22bc1599bdf..b4961105c72c2 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp
@@ -1671,8 +1671,8 @@ Instruction *InstCombinerImpl::canonicalizeConditionalNegationViaMathToSelect(
!Cond->getType()->isIntOrIntVectorTy(1) ||
!match(I.getOperand(0), m_c_Add(m_SExt(m_Specific(Cond)), m_Value(X))))
return nullptr;
- return SelectInst::Create(Cond, Builder.CreateNeg(X, X->getName() + ".neg"),
- X);
+ return createSelectInstWithUnknownProfile(
+ Cond, Builder.CreateNeg(X, X->getName() + ".neg"), X);
}
/// This a limited reassociation for a special case (see above) where we are
@@ -2467,7 +2467,8 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
m_c_And(m_Value(Neg, m_OneUse(m_Neg(m_And(m_Value(), m_One())))),
m_Value(Y)))) {
Value *Cmp = Builder.CreateIsNull(Neg);
- return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Y);
+ return createSelectInstWithUnknownProfile(Cmp,
+ ConstantInt::getNullValue(Ty), Y);
}
// Canonicalize:
@@ -2627,8 +2628,8 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
int BitNum = IsShiftLeft ? Log2C - Log2ShiftC : Log2ShiftC - Log2C;
assert(BitNum >= 0 && "Expected demanded bits to handle impossible mask");
Value *Cmp = Builder.CreateICmpEQ(X, ConstantInt::get(Ty, BitNum));
- return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C),
- ConstantInt::getNullValue(Ty));
+ return createSelectInstWithUnknownProfile(Cmp, ConstantInt::get(Ty, *C),
+ ConstantInt::getNullValue(Ty));
}
Constant *C1, *C2;
@@ -2647,8 +2648,8 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
// ((C1 << X) >> C2) & C3 -> X == (cttz(C3)+C2-cttz(C1)) ? C3 : 0
Constant *CmpC = ConstantExpr::getSub(LshrC, Log2C1);
Value *Cmp = Builder.CreateICmpEQ(X, CmpC);
- return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C3),
- ConstantInt::getNullValue(Ty));
+ return createSelectInstWithUnknownProfile(
+ Cmp, ConstantInt::get(Ty, *C3), ConstantInt::getNullValue(Ty));
}
}
@@ -2664,8 +2665,8 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
Constant *ShlC = ConstantExpr::getAdd(C2, Log2C1);
Constant *CmpC = ConstantExpr::getSub(ShlC, Log2C3);
Value *Cmp = Builder.CreateICmpEQ(X, CmpC);
- return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C3),
- ConstantInt::getNullValue(Ty));
+ return createSelectInstWithUnknownProfile(
+ Cmp, ConstantInt::get(Ty, *C3), ConstantInt::getNullValue(Ty));
}
}
}
@@ -2823,27 +2824,29 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
Value *A, *B;
if (match(&I, m_c_And(m_SExt(m_Value(A)), m_Value(B))) &&
A->getType()->isIntOrIntVectorTy(1))
- return SelectInst::Create(A, B, Constant::getNullValue(Ty));
+ return createSelectInstWithUnknownProfile(A, B, Constant::getNullValue(Ty));
// Similarly, a 'not' of the bool translates to a swap of the select arms:
// ~sext(A) & B / B & ~sext(A) --> A ? 0 : B
if (match(&I, m_c_And(m_Not(m_SExt(m_Value(A))), m_Value(B))) &&
A->getType()->isIntOrIntVectorTy(1))
- return SelectInst::Create(A, Constant::getNullValue(Ty), B);
+ return createSelectInstWithUnknownProfile(A, Constant::getNullValue(Ty), B);
// and(zext(A), B) -> A ? (B & 1) : 0
if (match(&I, m_c_And(m_OneUse(m_ZExt(m_Value(A))), m_Value(B))) &&
A->getType()->isIntOrIntVectorTy(1))
- return SelectInst::Create(A, Builder.CreateAnd(B, ConstantInt::get(Ty, 1)),
- Constant::getNullValue(Ty));
+ return createSelectInstWithUnknownProfile(
+ A, Builder.CreateAnd(B, ConstantInt::get(Ty, 1)),
+ Constant::getNullValue(Ty));
// (-1 + A) & B --> A ? 0 : B where A is 0/1.
if (match(&I, m_c_And(m_OneUse(m_Add(m_ZExtOrSelf(m_Value(A)), m_AllOnes())),
m_Value(B)))) {
if (A->getType()->isIntOrIntVectorTy(1))
- return SelectInst::Create(A, Constant::getNullValue(Ty), B);
+ return createSelectInstWithUnknownProfile(A, Constant::getNullValue(Ty),
+ B);
if (computeKnownBits(A, &I).countMaxActiveBits() <= 1) {
- return SelectInst::Create(
+ return createSelectInstWithUnknownProfile(
Builder.CreateICmpEQ(A, Constant::getNullValue(A->getType())), B,
Constant::getNullValue(Ty));
}
@@ -2855,7 +2858,8 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
m_Value(Y))) &&
*C == X->getType()->getScalarSizeInBits() - 1) {
Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
- return SelectInst::Create(IsNeg, Y, ConstantInt::getNullValue(Ty));
+ return createSelectInstWithUnknownProfile(IsNeg, Y,
+ ConstantInt::getNullValue(Ty));
}
// If there's a 'not' of the shifted value, swap the select operands:
// ~(iN X s>> (N-1)) & Y --> (X s< 0) ? 0 : Y -- with optional sext
@@ -2864,7 +2868,8 @@ Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
m_Value(Y))) &&
*C == X->getType()->getScalarSizeInBits() - 1) {
Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
- return SelectInst::Create(IsNeg, ConstantInt::getNullValue(Ty), Y);
+ return createSelectInstWithUnknownProfile(IsNeg,
+ ConstantInt::getNullValue(Ty), Y);
}
// (~x) & y --> ~(x | (~y)) iff that gets rid of inversions
@@ -4325,7 +4330,8 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
// canonicalization?
if (match(&I, m_c_Or(m_OneUse(m_SExt(m_Value(A))), m_Value(B))) &&
A->getType()->isIntOrIntVectorTy(1))
- return SelectInst::Create(A, ConstantInt::getAllOnesValue(Ty), B);
+ return createSelectInstWithUnknownProfile(
+ A, ConstantInt::getAllOnesValue(Ty), B);
// Note: If we've gotten to the point of visiting the outer OR, then the
// inner one couldn't be simplified. If it was a constant, then it won't
@@ -4374,7 +4380,7 @@ Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
m_Deferred(X)))) {
Value *NewICmpInst = Builder.CreateICmpSGT(X, Y);
Value *AllOnes = ConstantInt::getAllOnesValue(Ty);
- return SelectInst::Create(NewICmpInst, AllOnes, X);
+ return createSelectInstWithUnknownProfile(NewICmpInst, AllOnes, X);
}
}
@@ -5284,7 +5290,8 @@ Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
!match(C1, m_AllOnes())) {
assert(!C1->isZeroValue() && "Unexpected xor with 0");
Value *IsNotNeg = Builder.CreateIsNotNeg(X);
- return SelectInst::Create(IsNotNeg, Op1, Builder.CreateNot(Op1));
+ return createSelectInstWithUnknownProfile(IsNotNeg, Op1,
+ Builder.CreateNot(Op1));
}
}
diff --git a/llvm/test/Transforms/InstCombine/and.ll b/llvm/test/Transforms/InstCombine/and.ll
index f0fd0e262a795..6a25e53ddaba4 100644
--- a/llvm/test/Transforms/InstCombine/and.ll
+++ b/llvm/test/Transforms/InstCombine/and.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
declare void @use8(i8)
@@ -7,6 +7,9 @@ declare void @use32(i32)
; There should be no 'and' instructions left in any test.
+;.
+; CHECK: @g = external global i64
+;.
define i32 @test_with_1(i32 %x) {
; CHECK-LABEL: @test_with_1(
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X:%.*]], 0
@@ -1766,10 +1769,10 @@ define i8 @lshr_bitwidth_mask(i8 %x, i8 %y) {
ret i8 %r
}
-define i16 @signbit_splat_mask(i8 %x, i16 %y) {
+define i16 @signbit_splat_mask(i8 %x, i16 %y) !prof !0 {
; CHECK-LABEL: @signbit_splat_mask(
; CHECK-NEXT: [[ISNEG:%.*]] = icmp slt i8 [[X:%.*]], 0
-; CHECK-NEXT: [[R:%.*]] = select i1 [[ISNEG]], i16 [[Y:%.*]], i16 0
+; CHECK-NEXT: [[R:%.*]] = select i1 [[ISNEG]], i16 [[Y:%.*]], i16 0, !prof [[PROF1:![0-9]+]]
; CHECK-NEXT: ret i16 [[R]]
;
%a = ashr i8 %x, 7
@@ -1944,10 +1947,10 @@ define i8 @not_lshr_bitwidth_mask(i8 %x, i8 %y) {
ret i8 %r
}
-define i16 @invert_signbit_splat_mask(i8 %x, i16 %y) {
+define i16 @invert_signbit_splat_mask(i8 %x, i16 %y) !prof !0 {
; CHECK-LABEL: @invert_signbit_splat_mask(
; CHECK-NEXT: [[ISNOTNEG:%.*]] = icmp sgt i8 [[X:%.*]], -1
-; CHECK-NEXT: [[R:%.*]] = select i1 [[ISNOTNEG]], i16 [[Y:%.*]], i16 0
+; CHECK-NEXT: [[R:%.*]] = select i1 [[ISNOTNEG]], i16 [[Y:%.*]], i16 0, !prof [[PROF1]]
; CHECK-NEXT: ret i16 [[R]]
;
%a = ashr i8 %x, 7
@@ -2058,10 +2061,10 @@ define i16 @not_invert_signbit_splat_mask2(i8 %x, i16 %y) {
; CTTZ(ShlC) < LShrC
-define i16 @shl_lshr_pow2_const_case1(i16 %x) {
+define i16 @shl_lshr_pow2_const_case1(i16 %x) !prof !0 {
; CHECK-LABEL: @shl_lshr_pow2_const_case1(
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i16 [[X:%.*]], 7
-; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i16 8, i16 0
+; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i16 8, i16 0, !prof [[PROF1]]
; CHECK-NEXT: ret i16 [[R]]
;
%shl = shl i16 4, %x
@@ -2159,10 +2162,10 @@ define <3 x i16> @shl_lshr_pow2_const_case1_poison3_vec(<3 x i16> %x) {
; LShrC < CTTZ(ShlC) < LShrC + CTTZ(AndC)
-define i16 @shl_lshr_pow2_const_case2(i16 %x) {
+define i16 @shl_lshr_pow2_const_case2(i16 %x) !prof !0 {
; CHECK-LABEL: @shl_lshr_pow2_const_case2(
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i16 [[X:%.*]], 2
-; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i16 8, i16 0
+; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i16 8, i16 0, !prof [[PROF1]]
; CHECK-NEXT: ret i16 [[R]]
;
%shl = shl i16 16, %x
@@ -2312,10 +2315,10 @@ define i16 @lshr_lshr_pow2_const_negative_overflow(i16 %x) {
; demanded bits path for lshr+shl+and
; Log2(LshrC) + ShlC < BitWidth
-define i16 @lshr_shl_pow2_const_case1(i16 %x) {
+define i16 @lshr_shl_pow2_const_case1(i16 %x) !prof !0 {
; CHECK-LABEL: @lshr_shl_pow2_const_case1(
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i16 [[X:%.*]], 7
-; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i16 8, i16 0
+; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i16 8, i16 0, !prof [[PROF1]]
; CHECK-NEXT: ret i16 [[R]]
;
%lshr1 = lshr i16 256, %x
@@ -2339,10 +2342,10 @@ define i16 @lshr_shl_pow2_const_xor(i16 %x) {
; Log2(LshrC) + ShlC >= BitWidth
-define i16 @lshr_shl_pow2_const_case2(i16 %x) {
+define i16 @lshr_shl_pow2_const_case2(i16 %x) !prof !0 {
; CHECK-LABEL: @lshr_shl_pow2_const_case2(
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i16 [[X:%.*]], 12
-; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i16 32, i16 0
+; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i16 32, i16 0, !prof [[PROF1]]
; CHECK-NEXT: ret i16 [[R]]
;
%lshr1 = lshr i16 8192, %x
@@ -2454,11 +2457,11 @@ define <3 x i16> @lshr_shl_pow2_const_case1_poison3_vec(<3 x i16> %x) {
ret <3 x i16> %r
}
-define i8 @negate_lowbitmask(i8 %x, i8 %y) {
+define i8 @negate_lowbitmask(i8 %x, i8 %y) !prof !0 {
; CHECK-LABEL: @negate_lowbitmask(
; CHECK-NEXT: [[A:%.*]] = and i8 [[X:%.*]], 1
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[A]], 0
-; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i8 0, i8 [[Y:%.*]]
+; CHECK-NEXT: [[R:%.*]] = select i1 [[TMP1]], i8 0, i8 [[Y:%.*]], !prof [[PROF1]]
; CHECK-NEXT: ret i8 [[R]]
;
%a = and i8 %x, 1
@@ -2527,10 +2530,10 @@ define i64 @test_and_or_constexpr_infloop() {
ret i64 %or
}
-define i32 @and_zext(i32 %a, i1 %b) {
+define i32 @and_zext(i32 %a, i1 %b) !prof !0 {
; CHECK-LABEL: @and_zext(
; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[A:%.*]], 1
-; CHECK-NEXT: [[R:%.*]] = select i1 [[B:%.*]], i32 [[TMP1]], i32 0
+; CHECK-NEXT: [[R:%.*]] = select i1 [[B:%.*]], i32 [[TMP1]], i32 0, !prof [[PROF1]]
; CHECK-NEXT: ret i32 [[R]]
;
%mask = zext i1 %b to i32
@@ -2855,11 +2858,11 @@ define i32 @add_constant_equal_with_the_top_bit_of_demandedbits_insertpt(i32 %x,
ret i32 %and
}
-define i32 @and_sext_multiuse(i32 %x, i32 %y, i32 %a, i32 %b) {
+define i32 @and_sext_multiuse(i32 %x, i32 %y, i32 %a, i32 %b) !prof !0 {
; CHECK-LABEL: @and_sext_multiuse(
; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[Y:%.*]]
; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A:%.*]], [[B:%.*]]
-; CHECK-NEXT: [[ADD:%.*]] = select i1 [[CMP]], i32 [[TMP1]], i32 0
+; CHECK-NEXT: [[ADD:%.*]] = select i1 [[CMP]], i32 [[TMP1]], i32 0, !prof [[PROF1]]
; CHECK-NEXT: ret i32 [[ADD]]
;
%cmp = icmp sgt i32 %x, %y
@@ -2869,3 +2872,11 @@ define i32 @and_sext_multiuse(i32 %x, i32 %y, i32 %a, i32 %b) {
%add = add i32 %and1, %and2
ret i32 %add
}
+
+!0 = !{!"function_entry_count", i64 1000}
+;.
+; CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind }
+;.
+; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF1]] = !{!"unknown", !"instcombine"}
+;.
diff --git a/llvm/test/Transforms/InstCombine/binop-cast.ll b/llvm/test/Transforms/InstCombine/binop-cast.ll
index 7330d44098299..ae3036ffc88ae 100644
--- a/llvm/test/Transforms/InstCombine/binop-cast.ll
+++ b/llvm/test/Transforms/InstCombine/binop-cast.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
declare void @use(i32)
@@ -72,11 +72,11 @@ define i32 @and_sext_to_sel_multi_use_constant_mask(i1 %y) {
ret i32 %r
}
-define <2 x i32> @and_not_sext_to_sel(<2 x i32> %x, <2 x i1> %y) {
+define <2 x i32> @and_not_sext_to_sel(<2 x i32> %x, <2 x i1> %y) !prof !0 {
; CHECK-LABEL: @and_not_sext_to_sel(
; CHECK-NEXT: [[SEXT:%.*]] = sext <2 x i1> [[Y:%.*]] to <2 x i32>
; CHECK-NEXT: call void @use_vec(<2 x i32> [[SEXT]])
-; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[Y]], <2 x i32> zeroinitializer, <2 x i32> [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = select <2 x i1> [[Y]], <2 x i32> zeroinitializer, <2 x i32> [[X:%.*]], !prof [[PROF1:![0-9]+]]
; CHECK-NEXT: ret <2 x i32> [[R]]
;
%sext = sext <2 x i1> %y to <2 x i32>
@@ -139,9 +139,9 @@ define i32 @and_not_zext_to_sel(i32 %x, i1 %y) {
ret i32 %r
}
-define i32 @or_sext_to_sel(i32 %x, i1 %y) {
+define i32 @or_sext_to_sel(i32 %x, i1 %y) !prof !0 {
; CHECK-LABEL: @or_sext_to_sel(
-; CHECK-NEXT: [[R:%.*]] = select i1 [[Y:%.*]], i32 -1, i32 [[X:%.*]]
+; CHECK-NEXT: [[R:%.*]] = select i1 [[Y:%.*]], i32 -1, i32 [[X:%.*]], !prof [[PROF1]]
; CHECK-NEXT: ret i32 [[R]]
;
%sext = sext i1 %y to i32
@@ -258,11 +258,11 @@ define i32 @xor_sext_to_sel_multi_use_constant_mask(i1 %y) {
ret i32 %r
}
-define i64 @PR63321(ptr %ptr, i64 %c) {
+define i64 @PR63321(ptr %ptr, i64 %c) !prof !0 {
; CHECK-LABEL: @PR63321(
-; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr [[PTR:%.*]], align 1, !range [[RNG0:![0-9]+]]
+; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr [[PTR:%.*]], align 1, !range [[RNG2:![0-9]+]]
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i8 [[VAL]], 0
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[TMP1]], i64 [[C:%.*]], i64 0
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[TMP1]], i64 [[C:%.*]], i64 0, !prof [[PROF1]]
; CHECK-NEXT: ret i64 [[RES]]
;
%val = load i8, ptr %ptr, align 1, !range !{i8 0, i8 2}
@@ -275,7 +275,7 @@ define i64 @PR63321(ptr %ptr, i64 %c) {
; Negative test of PR63321
define i64 @and_add_non_bool(ptr %ptr, i64 %c) {
; CHECK-LABEL: @and_add_non_bool(
-; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr [[PTR:%.*]], align 1, !range [[RNG1:![0-9]+]]
+; CHECK-NEXT: [[VAL:%.*]] = load i8, ptr [[PTR:%.*]], align 1, !range [[RNG3:![0-9]+]]
; CHECK-NEXT: [[RHS:%.*]] = zext nneg i8 [[VAL]] to i64
; CHECK-NEXT: [[MASK:%.*]] = add nsw i64 [[RHS]], -1
; CHECK-NEXT: [[RES:%.*]] = and i64 [[MASK]], [[C:%.*]]
@@ -299,11 +299,11 @@ define i32 @and_add_bool_to_select(i1 %x, i32 %y) {
ret i32 %res
}
-define i32 @and_add_bool_no_fold(i32 %y) {
+define i32 @and_add_bool_no_fold(i32 %y) !prof !0 {
; CHECK-LABEL: @and_add_bool_no_fold(
; CHECK-NEXT: [[X:%.*]] = and i32 [[Y:%.*]], 1
; CHECK-NEXT: [[TMP1:%.*]] = icmp eq i32 [[X]], 0
-; CHECK-NEXT: [[RES:%.*]] = select i1 [[TMP1]], i32 [[Y]], i32 0
+; CHECK-NEXT: [[RES:%.*]] = select i1 [[TMP1]], i32 [[Y]], i32 0, !prof [[PROF1]]
; CHECK-NEXT: ret i32 [[RES]]
;
%x = and i32 %y, 1
@@ -336,3 +336,11 @@ define i32 @and_add_bool_to_select_multi_use(i1 %x, i32 %y) {
%ret = add i32 %res, %mask
ret i32 %ret
}
+
+!0 = !{!"function_entry_count", i64 1000}
+;.
+; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF1]] = !{!"unknown", !"instcombine"}
+; CHECK: [[RNG2]] = !{i8 0, i8 2}
+; CHECK: [[RNG3]] = !{i8 0, i8 3}
+;.
diff --git a/llvm/test/Transforms/InstCombine/conditional-negation.ll b/llvm/test/Transforms/InstCombine/conditional-negation.ll
index 0ae1af8f8e67f..c11a9e8ab18bd 100644
--- a/llvm/test/Transforms/InstCombine/conditional-negation.ll
+++ b/llvm/test/Transforms/InstCombine/conditional-negation.ll
@@ -1,11 +1,11 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
; Basic pattern
-define i8 @t0(i8 %x, i1 %cond) {
+define i8 @t0(i8 %x, i1 %cond) !prof !0 {
; CHECK-LABEL: @t0(
; CHECK-NEXT: [[X_NEG:%.*]] = sub i8 0, [[X:%.*]]
-; CHECK-NEXT: [[XOR:%.*]] = select i1 [[COND:%.*]], i8 [[X_NEG]], i8 [[X]]
+; CHECK-NEXT: [[XOR:%.*]] = select i1 [[COND:%.*]], i8 [[X_NEG]], i8 [[X]], !prof [[PROF1:![0-9]+]]
; CHECK-NEXT: ret i8 [[XOR]]
;
%cond.splat = sext i1 %cond to i8
@@ -267,3 +267,9 @@ define i8 @extrause111_v2(i8 %x, i1 %cond) {
declare void @use.i8(i8)
declare i8 @gen.i8()
+
+!0 = !{!"function_entry_count", i64 1000}
+;.
+; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF1]] = !{!"unknown", !"instcombine"}
+;.
diff --git a/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll b/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll
index 9774f6db55544..fffad1de08601 100644
--- a/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll
+++ b/llvm/test/Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; RUN: opt -passes=instcombine %s -S -o - | FileCheck %s
; Clamp positive to allOnes:
@@ -191,12 +191,12 @@ define <4 x i32> @neg_or_ashr_i32_vec_commute(<4 x i32> %x0) {
; Extra uses
-define i32 @sub_ashr_or_i32_extra_use_sub(i32 %x, i32 %y, ptr %p) {
+define i32 @sub_ashr_or_i32_extra_use_sub(i32 %x, i32 %y, ptr %p) !prof !0 {
; CHECK-LABEL: @sub_ashr_or_i32_extra_use_sub(
; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[Y:%.*]], [[X:%.*]]
; CHECK-NEXT: store i32 [[SUB]], ptr [[P:%.*]], align 4
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i32 [[X]], [[Y]]
-; CHECK-NEXT: [[OR:%.*]] = select i1 [[TMP1]], i32 -1, i32 [[X]]
+; CHECK-NEXT: [[OR:%.*]] = select i1 [[TMP1]], i32 -1, i32 [[X]], !prof [[PROF1:![0-9]+]]
; CHECK-NEXT: ret i32 [[OR]]
;
%sub = sub nsw i32 %y, %x
@@ -318,3 +318,11 @@ define i32 @sub_ashr_or_i32_shift_wrong_bit(i32 %x, i32 %y) {
%or = or i32 %shr, %x
ret i32 %or
}
+
+!0 = !{!"function_entry_count", i64 1000}
+;.
+; CHECK: attributes #[[ATTR0:[0-9]+]] = { nocallback nocreateundeforpoison nofree nosync nounwind speculatable willreturn memory(none) }
+;.
+; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF1]] = !{!"unknown", !"instcombine"}
+;.
diff --git a/llvm/test/Transforms/InstCombine/xor-ashr.ll b/llvm/test/Transforms/InstCombine/xor-ashr.ll
index f5ccdeef2f382..e6170647f02b0 100644
--- a/llvm/test/Transforms/InstCombine/xor-ashr.ll
+++ b/llvm/test/Transforms/InstCombine/xor-ashr.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-globals
; RUN: opt < %s -passes=instcombine -S | FileCheck %s
; RUN: opt < %s -passes=instcombine -use-constant-int-for-fixed-length-splat -S | FileCheck %s
@@ -7,10 +7,10 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f3
declare void @use16(i16)
declare void @use32(i32)
-define i8 @testi8i8(i8 %add) {
+define i8 @testi8i8(i8 %add) !prof !0 {
; CHECK-LABEL: @testi8i8(
; CHECK-NEXT: [[TMP1:%.*]] = icmp sgt i8 [[ADD:%.*]], -1
-; CHECK-NEXT: [[X:%.*]] = select i1 [[TMP1]], i8 127, i8 -128
+; CHECK-NEXT: [[X:%.*]] = select i1 [[TMP1]], i8 127, i8 -128, !prof [[PROF1:![0-9]+]]
; CHECK-NEXT: ret i8 [[X]]
;
%sh = ashr i8 %add, 7
@@ -153,3 +153,9 @@ define i16 @extrause_trunc2(i32 %add) {
%x = xor i16 %t, 127
ret i16 %x
}
+
+!0 = !{!"function_entry_count", i64 1000}
+;.
+; CHECK: [[META0:![0-9]+]] = !{!"function_entry_count", i64 1000}
+; CHECK: [[PROF1]] = !{!"unknown", !"instcombine"}
+;.
diff --git a/llvm/utils/profcheck-xfail.txt b/llvm/utils/profcheck-xfail.txt
index c98645457794c..d3463502cbd73 100644
--- a/llvm/utils/profcheck-xfail.txt
+++ b/llvm/utils/profcheck-xfail.txt
@@ -217,15 +217,12 @@ Transforms/IndVarSimplify/pr45835.ll
Transforms/IndVarSimplify/preserving-debugloc-rem-div.ll
Transforms/InstCombine/2004-09-20-BadLoadCombine.ll
Transforms/InstCombine/2005-04-07-UDivSelectCrash.ll
-Transforms/InstCombine/add-mask.ll
Transforms/InstCombine/add-shl-mul-umax.ll
Transforms/InstCombine/and2.ll
Transforms/InstCombine/and-fcmp.ll
-Transforms/InstCombine/and.ll
Transforms/InstCombine/and-or-icmps.ll
Transforms/InstCombine/apint-div1.ll
Transforms/InstCombine/apint-div2.ll
-Transforms/InstCombine/ashr-demand.ll
Transforms/InstCombine/atomic.ll
Transforms/InstCombine/binop-cast.ll
Transforms/InstCombine/binop-select-cast-of-select-cond.ll
@@ -238,9 +235,7 @@ Transforms/InstCombine/canonicalize-clamp-like-pattern-between-negative-and-posi
Transforms/InstCombine/canonicalize-clamp-like-pattern-between-zero-and-positive-threshold.ll
Transforms/InstCombine/cast-mul-select.ll
Transforms/InstCombine/clamp-to-minmax.ll
-Transforms/InstCombine/conditional-negation.ll
Transforms/InstCombine/cttz.ll
-Transforms/InstCombine/debuginfo-invert.ll
Transforms/InstCombine/demorgan.ll
Transforms/InstCombine/div.ll
Transforms/InstCombine/div-shift.ll
@@ -289,8 +284,6 @@ Transforms/InstCombine/mul.ll
Transforms/InstCombine/mul-masked-bits.ll
Transforms/InstCombine/mul-pow2.ll
Transforms/InstCombine/multiple-uses-load-bitcast-select.ll
-Transforms/InstCombine/narrow.ll
-Transforms/InstCombine/negated-bitmask.ll
Transforms/InstCombine/nested-select.ll
Transforms/InstCombine/not.ll
Transforms/InstCombine/or-bitmask.ll
@@ -321,8 +314,6 @@ Transforms/InstCombine/strchr-1.ll
Transforms/InstCombine/strchr-3.ll
Transforms/InstCombine/strlen-1.ll
Transforms/InstCombine/strrchr-3.ll
-Transforms/InstCombine/sub-ashr-and-to-icmp-select.ll
-Transforms/InstCombine/sub-ashr-or-to-icmp-select.ll
Transforms/InstCombine/sub-xor-cmp.ll
Transforms/InstCombine/truncating-saturate.ll
Transforms/InstCombine/unordered-fcmp-select.ll
@@ -332,9 +323,7 @@ Transforms/InstCombine/wcslen-3.ll
Transforms/InstCombine/X86/x86-avx512-inseltpoison.ll
Transforms/InstCombine/X86/x86-avx512.ll
Transforms/InstCombine/xor-and-or.ll
-Transforms/InstCombine/xor-ashr.ll
Transforms/InstCombine/zext-bool-add-sub.ll
-Transforms/InstCombine/zext-or-icmp.ll
Transforms/IROutliner/alloca-addrspace-1.ll
Transforms/IROutliner/alloca-addrspace.ll
Transforms/IROutliner/
diff erent-intrinsics.ll
More information about the llvm-commits
mailing list