[llvm] r355118 - [InstCombine] fold adds of constants separated by sext/zext
Sanjay Patel via llvm-commits
llvm-commits at lists.llvm.org
Thu Feb 28 11:05:26 PST 2019
Author: spatel
Date: Thu Feb 28 11:05:26 2019
New Revision: 355118
URL: http://llvm.org/viewvc/llvm-project?rev=355118&view=rev
Log:
[InstCombine] fold adds of constants separated by sext/zext
This is part of a transform that may be done in the backend:
D13757
...but it should always be beneficial to fold this sooner in IR
for all targets.
https://rise4fun.com/Alive/vaiW
Name: sext add nsw
%add = add nsw i8 %i, C0
%ext = sext i8 %add to i32
%r = add i32 %ext, C1
=>
%s = sext i8 %i to i32
%r = add i32 %s, sext(C0)+C1
Name: zext add nuw
%add = add nuw i8 %i, C0
%ext = zext i8 %add to i16
%r = add i16 %ext, C1
=>
%s = zext i8 %i to i16
%r = add i16 %s, zext(C0)+C1
Modified:
llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp
llvm/trunk/test/Transforms/InstCombine/add.ll
Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp?rev=355118&r1=355117&r2=355118&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineAddSub.cpp Thu Feb 28 11:05:26 2019
@@ -822,6 +822,47 @@ static Value *checkForNegativeOperand(Bi
return nullptr;
}
+/// Wrapping flags may allow combining constants separated by an extend.
+static Instruction *foldNoWrapAdd(BinaryOperator &Add,
+ InstCombiner::BuilderTy &Builder) {
+ Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
+ Type *Ty = Add.getType();
+ Constant *Op1C;
+ if (!match(Op1, m_Constant(Op1C)))
+ return nullptr;
+
+ // Try this match first because it results in an add in the narrow type.
+ // (zext (X +nuw C2)) + C1 --> zext (X + (C2 + trunc(C1)))
+ Value *X;
+ const APInt *C1, *C2;
+ if (match(Op1, m_APInt(C1)) &&
+ match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_APInt(C2))))) &&
+ C1->isNegative() && C1->sge(-C2->sext(C1->getBitWidth()))) {
+ Constant *NewC =
+ ConstantInt::get(X->getType(), *C2 + C1->trunc(C2->getBitWidth()));
+ return new ZExtInst(Builder.CreateNUWAdd(X, NewC), Ty);
+ }
+
+ // More general combining of constants in the wide type.
+ // (sext (X +nsw NarrowC)) + C --> (sext X) + (sext(NarrowC) + C)
+ Constant *NarrowC;
+ if (match(Op0, m_OneUse(m_SExt(m_NSWAdd(m_Value(X), m_Constant(NarrowC)))))) {
+ Constant *WideC = ConstantExpr::getSExt(NarrowC, Ty);
+ Constant *NewC = ConstantExpr::getAdd(WideC, Op1C);
+ Value *WideX = Builder.CreateSExt(X, Ty);
+ return BinaryOperator::CreateAdd(WideX, NewC);
+ }
+ // (zext (X +nuw NarrowC)) + C --> (zext X) + (zext(NarrowC) + C)
+ if (match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_Constant(NarrowC)))))) {
+ Constant *WideC = ConstantExpr::getZExt(NarrowC, Ty);
+ Constant *NewC = ConstantExpr::getAdd(WideC, Op1C);
+ Value *WideX = Builder.CreateZExt(X, Ty);
+ return BinaryOperator::CreateAdd(WideX, NewC);
+ }
+
+ return nullptr;
+}
+
Instruction *InstCombiner::foldAddWithConstant(BinaryOperator &Add) {
Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1);
Constant *Op1C;
@@ -870,14 +911,6 @@ Instruction *InstCombiner::foldAddWithCo
C2->isMinSignedValue() && C2->sext(Ty->getScalarSizeInBits()) == *C)
return CastInst::Create(Instruction::SExt, X, Ty);
- // (add (zext (add nuw X, C2)), C) --> (zext (add nuw X, C2 + C))
- if (match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_APInt(C2))))) &&
- C->isNegative() && C->sge(-C2->sext(C->getBitWidth()))) {
- Constant *NewC =
- ConstantInt::get(X->getType(), *C2 + C->trunc(C2->getBitWidth()));
- return new ZExtInst(Builder.CreateNUWAdd(X, NewC), Ty);
- }
-
if (C->isOneValue() && Op0->hasOneUse()) {
// add (sext i1 X), 1 --> zext (not X)
// TODO: The smallest IR representation is (select X, 0, 1), and that would
@@ -1050,6 +1083,9 @@ Instruction *InstCombiner::visitAdd(Bina
if (Instruction *X = foldAddWithConstant(I))
return X;
+ if (Instruction *X = foldNoWrapAdd(I, Builder))
+ return X;
+
// FIXME: This should be moved into the above helper function to allow these
// transforms for general constant or constant splat vectors.
Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
Modified: llvm/trunk/test/Transforms/InstCombine/add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/add.ll?rev=355118&r1=355117&r2=355118&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/add.ll (original)
+++ llvm/trunk/test/Transforms/InstCombine/add.ll Thu Feb 28 11:05:26 2019
@@ -396,9 +396,8 @@ define i8 @add_nuw_signbit(i8 %x) {
define i32 @add_nsw_sext_add(i8 %x) {
; CHECK-LABEL: @add_nsw_sext_add(
-; CHECK-NEXT: [[ADD:%.*]] = add nsw i8 [[X:%.*]], 42
-; CHECK-NEXT: [[EXT:%.*]] = sext i8 [[ADD]] to i32
-; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[EXT]], 356
+; CHECK-NEXT: [[TMP1:%.*]] = sext i8 [[X:%.*]] to i32
+; CHECK-NEXT: [[R:%.*]] = add nsw i32 [[TMP1]], 398
; CHECK-NEXT: ret i32 [[R]]
;
%add = add nsw i8 %x, 42
@@ -407,6 +406,8 @@ define i32 @add_nsw_sext_add(i8 %x) {
ret i32 %r
}
+; Negative test - extra use of the sext means increase of instructions.
+
define i32 @add_nsw_sext_add_extra_use_1(i8 %x, i32* %p) {
; CHECK-LABEL: @add_nsw_sext_add_extra_use_1(
; CHECK-NEXT: [[ADD:%.*]] = add nsw i8 [[X:%.*]], 42
@@ -426,8 +427,8 @@ define <2 x i32> @add_nsw_sext_add_vec_e
; CHECK-LABEL: @add_nsw_sext_add_vec_extra_use_2(
; CHECK-NEXT: [[ADD:%.*]] = add nsw <2 x i8> [[X:%.*]], <i8 42, i8 -5>
; CHECK-NEXT: store <2 x i8> [[ADD]], <2 x i8>* [[P:%.*]], align 2
-; CHECK-NEXT: [[EXT:%.*]] = sext <2 x i8> [[ADD]] to <2 x i32>
-; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i32> [[EXT]], <i32 356, i32 12>
+; CHECK-NEXT: [[TMP1:%.*]] = sext <2 x i8> [[X]] to <2 x i32>
+; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i32> [[TMP1]], <i32 398, i32 7>
; CHECK-NEXT: ret <2 x i32> [[R]]
;
%add = add nsw <2 x i8> %x, <i8 42, i8 -5>
@@ -439,9 +440,8 @@ define <2 x i32> @add_nsw_sext_add_vec_e
define <2 x i32> @add_nuw_zext_add_vec(<2 x i16> %x) {
; CHECK-LABEL: @add_nuw_zext_add_vec(
-; CHECK-NEXT: [[ADD:%.*]] = add nuw <2 x i16> [[X:%.*]], <i16 -42, i16 5>
-; CHECK-NEXT: [[EXT:%.*]] = zext <2 x i16> [[ADD]] to <2 x i32>
-; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i32> [[EXT]], <i32 356, i32 -12>
+; CHECK-NEXT: [[TMP1:%.*]] = zext <2 x i16> [[X:%.*]] to <2 x i32>
+; CHECK-NEXT: [[R:%.*]] = add nsw <2 x i32> [[TMP1]], <i32 65850, i32 -7>
; CHECK-NEXT: ret <2 x i32> [[R]]
;
%add = add nuw <2 x i16> %x, <i16 -42, i16 5>
@@ -450,6 +450,8 @@ define <2 x i32> @add_nuw_zext_add_vec(<
ret <2 x i32> %r
}
+; Negative test - extra use of the zext means increase of instructions.
+
define i64 @add_nuw_zext_add_extra_use_1(i8 %x, i64* %p) {
; CHECK-LABEL: @add_nuw_zext_add_extra_use_1(
; CHECK-NEXT: [[ADD:%.*]] = add nuw i8 [[X:%.*]], 42
@@ -469,8 +471,8 @@ define i64 @add_nuw_zext_add_extra_use_2
; CHECK-LABEL: @add_nuw_zext_add_extra_use_2(
; CHECK-NEXT: [[ADD:%.*]] = add nuw i8 [[X:%.*]], 42
; CHECK-NEXT: store i8 [[ADD]], i8* [[P:%.*]], align 1
-; CHECK-NEXT: [[EXT:%.*]] = zext i8 [[ADD]] to i64
-; CHECK-NEXT: [[R:%.*]] = add nuw nsw i64 [[EXT]], -356
+; CHECK-NEXT: [[TMP1:%.*]] = zext i8 [[X]] to i64
+; CHECK-NEXT: [[R:%.*]] = add nuw nsw i64 [[TMP1]], -314
; CHECK-NEXT: ret i64 [[R]]
;
%add = add nuw i8 %x, 42
More information about the llvm-commits
mailing list