[llvm] 2d031ec - [InstCombine] add one-use check to opposite shift folds
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Thu Jan 20 10:49:33 PST 2022
Author: Sanjay Patel
Date: 2022-01-20T13:49:23-05:00
New Revision: 2d031ec5e53f4e28ea5cc02b4cfdead98a9c0007
URL: https://github.com/llvm/llvm-project/commit/2d031ec5e53f4e28ea5cc02b4cfdead98a9c0007
DIFF: https://github.com/llvm/llvm-project/commit/2d031ec5e53f4e28ea5cc02b4cfdead98a9c0007.diff
LOG: [InstCombine] add one-use check to opposite shift folds
Test comments say this might be intentional, but I don't
see any hard evidence to support it. The extra instruction
shows up as a potential regression in D117680.
One test does show a missed fold that might be recovered
with better demanded bits analysis.
Added:
Modified:
llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
llvm/test/Transforms/InstCombine/canonicalize-shl-lshr-to-masking.ll
llvm/test/Transforms/InstCombine/shift.ll
Removed:
################################################################################
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
index 0ade25f76825..9acad19df9df 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineShifts.cpp
@@ -1032,12 +1032,13 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
NewLShr->setIsExact(I.isExact());
return NewLShr;
}
- // (X << C1) >>u C --> (X >>u (C - C1)) & (-1 >> C)
- Value *NewLShr = Builder.CreateLShr(X, ShiftDiff, "", I.isExact());
- APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmtC));
- return BinaryOperator::CreateAnd(NewLShr, ConstantInt::get(Ty, Mask));
- }
- if (C1->ugt(ShAmtC)) {
+ if (Op0->hasOneUse()) {
+ // (X << C1) >>u C --> (X >>u (C - C1)) & (-1 >> C)
+ Value *NewLShr = Builder.CreateLShr(X, ShiftDiff, "", I.isExact());
+ APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmtC));
+ return BinaryOperator::CreateAnd(NewLShr, ConstantInt::get(Ty, Mask));
+ }
+ } else if (C1->ugt(ShAmtC)) {
unsigned ShlAmtC = C1->getZExtValue();
Constant *ShiftDiff = ConstantInt::get(Ty, ShlAmtC - ShAmtC);
if (cast<BinaryOperator>(Op0)->hasNoUnsignedWrap()) {
@@ -1046,15 +1047,18 @@ Instruction *InstCombinerImpl::visitLShr(BinaryOperator &I) {
NewShl->setHasNoUnsignedWrap(true);
return NewShl;
}
- // (X << C1) >>u C --> X << (C1 - C) & (-1 >> C)
- Value *NewShl = Builder.CreateShl(X, ShiftDiff);
+ if (Op0->hasOneUse()) {
+ // (X << C1) >>u C --> X << (C1 - C) & (-1 >> C)
+ Value *NewShl = Builder.CreateShl(X, ShiftDiff);
+ APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmtC));
+ return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
+ }
+ } else {
+ assert(*C1 == ShAmtC);
+ // (X << C) >>u C --> X & (-1 >>u C)
APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmtC));
- return BinaryOperator::CreateAnd(NewShl, ConstantInt::get(Ty, Mask));
+ return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
}
- assert(*C1 == ShAmtC);
- // (X << C) >>u C --> X & (-1 >>u C)
- APInt Mask(APInt::getLowBitsSet(BitWidth, BitWidth - ShAmtC));
- return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, Mask));
}
// ((X << C) + Y) >>u C --> (X + (Y >>u C)) & (-1 >>u C)
diff --git a/llvm/test/Transforms/InstCombine/canonicalize-shl-lshr-to-masking.ll b/llvm/test/Transforms/InstCombine/canonicalize-shl-lshr-to-masking.ll
index 22952145d54a..265570dfe6d5 100644
--- a/llvm/test/Transforms/InstCombine/canonicalize-shl-lshr-to-masking.ll
+++ b/llvm/test/Transforms/InstCombine/canonicalize-shl-lshr-to-masking.ll
@@ -310,13 +310,13 @@ define i32 @positive_biggerLshr_shlnuw_multiuse(i32 %x) {
ret i32 %ret
}
-; NOTE: creates one extra instruction, but this seems intentional.
+; negative test - don't create extra instructions
+
define i32 @positive_biggerShl_multiuse_extrainstr(i32 %x) {
; CHECK-LABEL: @positive_biggerShl_multiuse_extrainstr(
; CHECK-NEXT: [[T0:%.*]] = shl i32 [[X:%.*]], 10
; CHECK-NEXT: call void @use32(i32 [[T0]])
-; CHECK-NEXT: [[TMP1:%.*]] = shl i32 [[X]], 5
-; CHECK-NEXT: [[RET:%.*]] = and i32 [[TMP1]], 134217696
+; CHECK-NEXT: [[RET:%.*]] = lshr exact i32 [[T0]], 5
; CHECK-NEXT: ret i32 [[RET]]
;
%t0 = shl i32 %x, 10
@@ -325,13 +325,13 @@ define i32 @positive_biggerShl_multiuse_extrainstr(i32 %x) {
ret i32 %ret
}
-; NOTE: creates one extra instruction, but this seems intentional.
+; negative test - don't create extra instructions
+
define i32 @positive_biggerLshr_multiuse_extrainstr(i32 %x) {
; CHECK-LABEL: @positive_biggerLshr_multiuse_extrainstr(
; CHECK-NEXT: [[T0:%.*]] = shl i32 [[X:%.*]], 5
; CHECK-NEXT: call void @use32(i32 [[T0]])
-; CHECK-NEXT: [[TMP1:%.*]] = lshr i32 [[X]], 5
-; CHECK-NEXT: [[RET:%.*]] = and i32 [[TMP1]], 4194303
+; CHECK-NEXT: [[RET:%.*]] = lshr i32 [[T0]], 10
; CHECK-NEXT: ret i32 [[RET]]
;
%t0 = shl i32 %x, 5
diff --git a/llvm/test/Transforms/InstCombine/shift.ll b/llvm/test/Transforms/InstCombine/shift.ll
index 11d9be073b02..d165c687849c 100644
--- a/llvm/test/Transforms/InstCombine/shift.ll
+++ b/llvm/test/Transforms/InstCombine/shift.ll
@@ -650,7 +650,7 @@ define i8 @test39(i32 %a0) {
; CHECK-NEXT: [[I49:%.*]] = shl i8 [[I4]], 6
; CHECK-NEXT: [[I50:%.*]] = and i8 [[I49]], 64
; CHECK-NEXT: [[I51:%.*]] = xor i8 [[I50]], [[I5]]
-; CHECK-NEXT: [[TMP0:%.*]] = shl i8 [[I4]], 2
+; CHECK-NEXT: [[TMP0:%.*]] = lshr exact i8 [[I5]], 3
; CHECK-NEXT: [[I54:%.*]] = and i8 [[TMP0]], 16
; CHECK-NEXT: [[I551:%.*]] = or i8 [[I54]], [[I51]]
; CHECK-NEXT: ret i8 [[I551]]
More information about the llvm-commits
mailing list