[llvm] r291972 - [InstCombine] use m_APInt to allow lshr folds for vectors with splat constants
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Fri Jan 13 15:04:11 PST 2017
Author: spatel
Date: Fri Jan 13 17:04:10 2017
New Revision: 291972
URL: http://llvm.org/viewvc/llvm-project?rev=291972&view=rev
Log:
[InstCombine] use m_APInt to allow lshr folds for vectors with splat constants
Modified:
llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp
llvm/trunk/test/Transforms/InstCombine/lshr.ll
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp?rev=291972&r1=291971&r2=291972&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp Fri Jan 13 17:04:10 2017
@@ -774,34 +774,31 @@ Instruction *InstCombiner::visitLShr(Bin
if (Instruction *R = commonShiftTransforms(I))
return R;
- if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
- unsigned ShAmt = Op1C->getZExtValue();
-
- if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Op0)) {
- unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
+ const APInt *ShAmtAPInt;
+ if (match(Op1, m_APInt(ShAmtAPInt))) {
+ unsigned ShAmt = ShAmtAPInt->getZExtValue();
+ unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
+ auto *II = dyn_cast<IntrinsicInst>(Op0);
+ if (II && isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == ShAmt &&
+ (II->getIntrinsicID() == Intrinsic::ctlz ||
+ II->getIntrinsicID() == Intrinsic::cttz ||
+ II->getIntrinsicID() == Intrinsic::ctpop)) {
// ctlz.i32(x)>>5 --> zext(x == 0)
// cttz.i32(x)>>5 --> zext(x == 0)
// ctpop.i32(x)>>5 --> zext(x == -1)
- if ((II->getIntrinsicID() == Intrinsic::ctlz ||
- II->getIntrinsicID() == Intrinsic::cttz ||
- II->getIntrinsicID() == Intrinsic::ctpop) &&
- isPowerOf2_32(BitWidth) && Log2_32(BitWidth) == ShAmt) {
- bool isCtPop = II->getIntrinsicID() == Intrinsic::ctpop;
- Constant *RHS = ConstantInt::getSigned(Op0->getType(), isCtPop ? -1:0);
- Value *Cmp = Builder->CreateICmpEQ(II->getArgOperand(0), RHS);
- return new ZExtInst(Cmp, II->getType());
- }
+ bool IsPop = II->getIntrinsicID() == Intrinsic::ctpop;
+ Constant *RHS = ConstantInt::getSigned(Op0->getType(), IsPop ? -1 : 0);
+ Value *Cmp = Builder->CreateICmpEQ(II->getArgOperand(0), RHS);
+ return new ZExtInst(Cmp, II->getType());
}
// If the shifted-out value is known-zero, then this is an exact shift.
if (!I.isExact() &&
- MaskedValueIsZero(Op0, APInt::getLowBitsSet(Op1C->getBitWidth(), ShAmt),
- 0, &I)){
+ MaskedValueIsZero(Op0, APInt::getLowBitsSet(BitWidth, ShAmt), 0, &I)) {
I.setIsExact();
return &I;
}
}
-
return nullptr;
}
Modified: llvm/trunk/test/Transforms/InstCombine/lshr.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/lshr.ll?rev=291972&r1=291971&r2=291972&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/lshr.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/lshr.ll Fri Jan 13 17:04:10 2017
@@ -43,8 +43,8 @@ define i32 @lshr_ctpop(i32 %x) {
define <2 x i8> @lshr_ctlz_zero_is_not_undef_splat_vec(<2 x i8> %x) {
; CHECK-LABEL: @lshr_ctlz_zero_is_not_undef_splat_vec(
-; CHECK-NEXT: [[CT:%.*]] = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> %x, i1 false)
-; CHECK-NEXT: [[SH:%.*]] = lshr <2 x i8> [[CT]], <i8 3, i8 3>
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i8> %x, zeroinitializer
+; CHECK-NEXT: [[SH:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i8>
; CHECK-NEXT: ret <2 x i8> [[SH]]
;
%ct = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> %x, i1 false)
@@ -54,8 +54,8 @@ define <2 x i8> @lshr_ctlz_zero_is_not_u
define <2 x i8> @lshr_cttz_zero_is_not_undef_splat_vec(<2 x i8> %x) {
; CHECK-LABEL: @lshr_cttz_zero_is_not_undef_splat_vec(
-; CHECK-NEXT: [[CT:%.*]] = call <2 x i8> @llvm.cttz.v2i8(<2 x i8> %x, i1 false)
-; CHECK-NEXT: [[SH:%.*]] = lshr <2 x i8> [[CT]], <i8 3, i8 3>
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i8> %x, zeroinitializer
+; CHECK-NEXT: [[SH:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i8>
; CHECK-NEXT: ret <2 x i8> [[SH]]
;
%ct = call <2 x i8> @llvm.cttz.v2i8(<2 x i8> %x, i1 false)
@@ -65,8 +65,8 @@ define <2 x i8> @lshr_cttz_zero_is_not_u
define <2 x i8> @lshr_ctpop_splat_vec(<2 x i8> %x) {
; CHECK-LABEL: @lshr_ctpop_splat_vec(
-; CHECK-NEXT: [[CT:%.*]] = call <2 x i8> @llvm.ctpop.v2i8(<2 x i8> %x)
-; CHECK-NEXT: [[SH:%.*]] = lshr <2 x i8> [[CT]], <i8 3, i8 3>
+; CHECK-NEXT: [[TMP1:%.*]] = icmp eq <2 x i8> %x, <i8 -1, i8 -1>
+; CHECK-NEXT: [[SH:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i8>
; CHECK-NEXT: ret <2 x i8> [[SH]]
;
%ct = call <2 x i8> @llvm.ctpop.v2i8(<2 x i8> %x)
@@ -91,7 +91,7 @@ define <2 x i8> @lshr_exact_splat_vec(<2
; CHECK-LABEL: @lshr_exact_splat_vec(
; CHECK-NEXT: [[SHL:%.*]] = shl <2 x i8> %x, <i8 2, i8 2>
; CHECK-NEXT: [[ADD:%.*]] = add <2 x i8> [[SHL]], <i8 4, i8 4>
-; CHECK-NEXT: [[LSHR:%.*]] = lshr <2 x i8> [[ADD]], <i8 2, i8 2>
+; CHECK-NEXT: [[LSHR:%.*]] = lshr exact <2 x i8> [[ADD]], <i8 2, i8 2>
; CHECK-NEXT: ret <2 x i8> [[LSHR]]
;
%shl = shl <2 x i8> %x, <i8 2, i8 2>
More information about the llvm-commits
mailing list