[llvm-branch-commits] [llvm-branch] r368673 - Merging r368517, r368518, r368519, and r368554:
Hans Wennborg via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Aug 13 04:56:03 PDT 2019
Author: hans
Date: Tue Aug 13 04:56:03 2019
New Revision: 368673
URL: http://llvm.org/viewvc/llvm-project?rev=368673&view=rev
Log:
Merging r368517, r368518, r368519, and r368554:
------------------------------------------------------------------------
r368517 | lebedevri | 2019-08-10 21:28:12 +0200 (Sat, 10 Aug 2019) | 1 line
[NFC][InstCombine] Tests for shift amount reassociation in bittest with shift of const
------------------------------------------------------------------------
------------------------------------------------------------------------
r368518 | lebedevri | 2019-08-10 21:28:44 +0200 (Sat, 10 Aug 2019) | 5 lines
[InstCombine] Shift amount reassociation in bittest: drop pointless one-use restriction
That one-use restriction is not needed for correctness - we have already
ensured that one of the shifts will go away, so we know we won't increase
the instruction count. So there is no need for that restriction.
------------------------------------------------------------------------
------------------------------------------------------------------------
r368519 | lebedevri | 2019-08-10 21:28:54 +0200 (Sat, 10 Aug 2019) | 5 lines
[InstCombine] Shift amount reassociation in bittest: relax one-use check when shifting constant
If one of the values being shifted is a constant, since the new shift
amount is known-constant, the new shift will end up being constant-folded
so, we don't need that one-use restriction then.
------------------------------------------------------------------------
------------------------------------------------------------------------
r368554 | lebedevri | 2019-08-12 13:28:02 +0200 (Mon, 12 Aug 2019) | 6 lines
[InstCombine] foldShiftIntoShiftInAnotherHandOfAndInICmp(): avoid constantexpr pitfail (PR42962)
Instead of matching value and then blindly casting to BinaryOperator
just to get the opcode, just match instruction and do no cast.
Fixes https://bugs.llvm.org/show_bug.cgi?id=42962
------------------------------------------------------------------------
Modified:
llvm/branches/release_90/ (props changed)
llvm/branches/release_90/lib/Transforms/InstCombine/InstCombineCompares.cpp
llvm/branches/release_90/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest.ll
Propchange: llvm/branches/release_90/
------------------------------------------------------------------------------
--- svn:mergeinfo (original)
+++ svn:mergeinfo Tue Aug 13 04:56:03 2019
@@ -1,3 +1,3 @@
/llvm/branches/Apple/Pertwee:110850,110961
/llvm/branches/type-system-rewrite:133420-134817
-/llvm/trunk:155241,366431,366481,366487,366527,366570,366660,366868,366925,367030,367062,367124,367215,367292,367304,367306,367314,367340-367341,367394,367396,367398,367403,367417,367662,367750,367753,367846-367847,367898,367941,368004,368230,368315,368324
+/llvm/trunk:155241,366431,366481,366487,366527,366570,366660,366868,366925,367030,367062,367124,367215,367292,367304,367306,367314,367340-367341,367394,367396,367398,367403,367417,367662,367750,367753,367846-367847,367898,367941,368004,368230,368315,368324,368517-368519,368554
Modified: llvm/branches/release_90/lib/Transforms/InstCombine/InstCombineCompares.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_90/lib/Transforms/InstCombine/InstCombineCompares.cpp?rev=368673&r1=368672&r2=368673&view=diff
==============================================================================
--- llvm/branches/release_90/lib/Transforms/InstCombine/InstCombineCompares.cpp (original)
+++ llvm/branches/release_90/lib/Transforms/InstCombine/InstCombineCompares.cpp Tue Aug 13 04:56:03 2019
@@ -3288,26 +3288,35 @@ foldShiftIntoShiftInAnotherHandOfAndInIC
// Look for an 'and' of two (opposite) logical shifts.
// Pick the single-use shift as XShift.
- Value *XShift, *YShift;
+ Instruction *XShift, *YShift;
if (!match(I.getOperand(0),
- m_c_And(m_OneUse(m_CombineAnd(m_AnyLogicalShift, m_Value(XShift))),
- m_CombineAnd(m_AnyLogicalShift, m_Value(YShift)))))
+ m_c_And(m_CombineAnd(m_AnyLogicalShift, m_Instruction(XShift)),
+ m_CombineAnd(m_AnyLogicalShift, m_Instruction(YShift)))))
return nullptr;
- // If YShift is a single-use 'lshr', swap the shifts around.
- if (match(YShift, m_OneUse(m_AnyLShr)))
+ // If YShift is a 'lshr', swap the shifts around.
+ if (match(YShift, m_AnyLShr))
std::swap(XShift, YShift);
// The shifts must be in opposite directions.
- Instruction::BinaryOps XShiftOpcode =
- cast<BinaryOperator>(XShift)->getOpcode();
- if (XShiftOpcode == cast<BinaryOperator>(YShift)->getOpcode())
+ auto XShiftOpcode = XShift->getOpcode();
+ if (XShiftOpcode == YShift->getOpcode())
return nullptr; // Do not care about same-direction shifts here.
Value *X, *XShAmt, *Y, *YShAmt;
match(XShift, m_BinOp(m_Value(X), m_Value(XShAmt)));
match(YShift, m_BinOp(m_Value(Y), m_Value(YShAmt)));
+ // If one of the values being shifted is a constant, then we will end with
+ // and+icmp, and shift instr will be constant-folded. If they are not,
+ // however, we will need to ensure that we won't increase instruction count.
+ if (!isa<Constant>(X) && !isa<Constant>(Y)) {
+ // At least one of the hands of the 'and' should be one-use shift.
+ if (!match(I.getOperand(0),
+ m_c_And(m_OneUse(m_AnyLogicalShift), m_Value())))
+ return nullptr;
+ }
+
// Can we fold (XShAmt+YShAmt) ?
Value *NewShAmt = SimplifyBinOp(Instruction::BinaryOps::Add, XShAmt, YShAmt,
SQ.getWithInstruction(&I));
Modified: llvm/branches/release_90/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/branches/release_90/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest.ll?rev=368673&r1=368672&r2=368673&view=diff
==============================================================================
--- llvm/branches/release_90/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest.ll (original)
+++ llvm/branches/release_90/test/Transforms/InstCombine/shift-amount-reassociation-in-bittest.ll Tue Aug 13 04:56:03 2019
@@ -279,8 +279,8 @@ define i1 @t18_const_oneuse0(i32 %x, i32
; CHECK-LABEL: @t18_const_oneuse0(
; CHECK-NEXT: [[T0:%.*]] = lshr i32 [[X:%.*]], 1
; CHECK-NEXT: call void @use32(i32 [[T0]])
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[Y:%.*]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[X]]
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[Y:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-NEXT: ret i1 [[TMP3]]
;
@@ -521,15 +521,70 @@ define i1 @t31_var_oneuse6(i32 %x, i32 %
ret i1 %t3
}
+; Shift-of-const
+
+; Ok, non-truncated shift is of constant;
+define i1 @t32_shift_of_const_oneuse0(i32 %x, i32 %y, i32 %len) {
+; CHECK-LABEL: @t32_shift_of_const_oneuse0(
+; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
+; CHECK-NEXT: call void @use32(i32 [[T0]])
+; CHECK-NEXT: [[T1:%.*]] = lshr i32 -52543054, [[T0]]
+; CHECK-NEXT: call void @use32(i32 [[T1]])
+; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], -1
+; CHECK-NEXT: call void @use32(i32 [[T2]])
+; CHECK-NEXT: [[T3:%.*]] = shl i32 [[Y:%.*]], [[T2]]
+; CHECK-NEXT: call void @use32(i32 [[T3]])
+; CHECK-NEXT: [[TMP1:%.*]] = and i32 [[Y]], 1
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
+; CHECK-NEXT: ret i1 [[TMP2]]
+;
+ %t0 = sub i32 32, %len
+ call void @use32(i32 %t0)
+ %t1 = lshr i32 4242424242, %t0 ; shift-of-constant
+ call void @use32(i32 %t1)
+ %t2 = add i32 %len, -1
+ call void @use32(i32 %t2)
+ %t3 = shl i32 %y, %t2
+ call void @use32(i32 %t3)
+ %t4 = and i32 %t1, %t3 ; no extra uses
+ %t5 = icmp ne i32 %t4, 0
+ ret i1 %t5
+}
+; Ok, truncated shift is of constant;
+define i1 @t33_shift_of_const_oneuse1(i32 %x, i32 %y, i32 %len) {
+; CHECK-LABEL: @t33_shift_of_const_oneuse1(
+; CHECK-NEXT: [[T0:%.*]] = sub i32 32, [[LEN:%.*]]
+; CHECK-NEXT: call void @use32(i32 [[T0]])
+; CHECK-NEXT: [[T1:%.*]] = lshr i32 [[X:%.*]], [[T0]]
+; CHECK-NEXT: call void @use32(i32 [[T1]])
+; CHECK-NEXT: [[T2:%.*]] = add i32 [[LEN]], -1
+; CHECK-NEXT: call void @use32(i32 [[T2]])
+; CHECK-NEXT: [[T3:%.*]] = shl i32 -52543054, [[T2]]
+; CHECK-NEXT: call void @use32(i32 [[T3]])
+; CHECK-NEXT: ret i1 false
+;
+ %t0 = sub i32 32, %len
+ call void @use32(i32 %t0)
+ %t1 = lshr i32 %x, %t0 ; shift-of-constant
+ call void @use32(i32 %t1)
+ %t2 = add i32 %len, -1
+ call void @use32(i32 %t2)
+ %t3 = shl i32 4242424242, %t2
+ call void @use32(i32 %t3)
+ %t4 = and i32 %t1, %t3 ; no extra uses
+ %t5 = icmp ne i32 %t4, 0
+ ret i1 %t5
+}
+
; Commutativity with extra uses
-define i1 @t32_commutativity0_oneuse0(i32 %x) {
-; CHECK-LABEL: @t32_commutativity0_oneuse0(
+define i1 @t34_commutativity0_oneuse0(i32 %x) {
+; CHECK-LABEL: @t34_commutativity0_oneuse0(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[T0:%.*]] = lshr i32 [[X:%.*]], 1
; CHECK-NEXT: call void @use32(i32 [[T0]])
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[Y]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[X]]
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[Y]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-NEXT: ret i1 [[TMP3]]
;
@@ -541,8 +596,8 @@ define i1 @t32_commutativity0_oneuse0(i3
%t3 = icmp ne i32 %t2, 0
ret i1 %t3
}
-define i1 @t33_commutativity0_oneuse1(i32 %x) {
-; CHECK-LABEL: @t33_commutativity0_oneuse1(
+define i1 @t35_commutativity0_oneuse1(i32 %x) {
+; CHECK-LABEL: @t35_commutativity0_oneuse1(
; CHECK-NEXT: [[Y:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[T1:%.*]] = shl i32 [[Y]], 1
; CHECK-NEXT: call void @use32(i32 [[T1]])
@@ -560,13 +615,13 @@ define i1 @t33_commutativity0_oneuse1(i3
ret i1 %t3
}
-define i1 @t34_commutativity1_oneuse0(i32 %y) {
-; CHECK-LABEL: @t34_commutativity1_oneuse0(
+define i1 @t36_commutativity1_oneuse0(i32 %y) {
+; CHECK-LABEL: @t36_commutativity1_oneuse0(
; CHECK-NEXT: [[X:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[T0:%.*]] = lshr i32 [[X]], 1
; CHECK-NEXT: call void @use32(i32 [[T0]])
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[Y:%.*]], 2
-; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[X]]
+; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X]], 2
+; CHECK-NEXT: [[TMP2:%.*]] = and i32 [[TMP1]], [[Y:%.*]]
; CHECK-NEXT: [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
; CHECK-NEXT: ret i1 [[TMP3]]
;
@@ -578,8 +633,8 @@ define i1 @t34_commutativity1_oneuse0(i3
%t3 = icmp ne i32 %t2, 0
ret i1 %t3
}
-define i1 @t35_commutativity1_oneuse1(i32 %y) {
-; CHECK-LABEL: @t35_commutativity1_oneuse1(
+define i1 @t37_commutativity1_oneuse1(i32 %y) {
+; CHECK-LABEL: @t37_commutativity1_oneuse1(
; CHECK-NEXT: [[X:%.*]] = call i32 @gen32()
; CHECK-NEXT: [[T1:%.*]] = shl i32 [[Y:%.*]], 1
; CHECK-NEXT: call void @use32(i32 [[T1]])
@@ -598,8 +653,8 @@ define i1 @t35_commutativity1_oneuse1(i3
}
; Negative tests
-define <2 x i1> @n36_overshift(<2 x i32> %x, <2 x i32> %y) {
-; CHECK-LABEL: @n36_overshift(
+define <2 x i1> @n38_overshift(<2 x i32> %x, <2 x i32> %y) {
+; CHECK-LABEL: @n38_overshift(
; CHECK-NEXT: [[T0:%.*]] = lshr <2 x i32> [[X:%.*]], <i32 15, i32 1>
; CHECK-NEXT: [[T1:%.*]] = shl <2 x i32> [[Y:%.*]], <i32 17, i32 1>
; CHECK-NEXT: [[T2:%.*]] = and <2 x i32> [[T1]], [[T0]]
@@ -612,3 +667,15 @@ define <2 x i1> @n36_overshift(<2 x i32>
%t3 = icmp ne <2 x i32> %t2, <i32 0, i32 0>
ret <2 x i1> %t3
}
+
+; As usual, don't crash given constantexpr's :/
+ at f.a = internal global i16 0
+define i1 @constantexpr() {
+entry:
+ %0 = load i16, i16* @f.a
+ %shr = ashr i16 %0, 1
+ %shr1 = ashr i16 %shr, zext (i1 icmp ne (i16 ptrtoint (i16* @f.a to i16), i16 1) to i16)
+ %and = and i16 %shr1, 1
+ %tobool = icmp ne i16 %and, 0
+ ret i1 %tobool
+}
More information about the llvm-branch-commits
mailing list