[llvm] r285696 - [InstCombine] Folding of shifts by the sum of positive values

Simon Pilgrim via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 1 08:40:31 PDT 2016


Author: rksimon
Date: Tue Nov  1 10:40:30 2016
New Revision: 285696

URL: http://llvm.org/viewvc/llvm-project?rev=285696&view=rev
Log:
[InstCombine] Folding of shifts by the sum of positive values

This patch introduces the combine:

(C1 shift (A add C2)) -> ((C1 shift C2) shift A)
iff A and C2 are both positive

If both A and C2 are know to be positive then we can safely split into 2 shifts, permitting the folding of the Inner shift.

Fix for the spec benchmark case mentioned by @nadav on PR15141 (assuming we can prove that the inputs as positive).

Differential Revision: https://reviews.llvm.org/D26000

Added:
    llvm/trunk/test/Transforms/InstCombine/shift-add.ll
Modified:
    llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp

Modified: llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp?rev=285696&r1=285695&r2=285696&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp (original)
+++ llvm/trunk/lib/Transforms/InstCombine/InstCombineShifts.cpp Tue Nov  1 10:40:30 2016
@@ -39,10 +39,19 @@ Instruction *InstCombiner::commonShiftTr
     if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I))
       return Res;
 
+  // (C1 shift (A add C2)) -> (C1 shift C2) shift A)
+  // iff A and C2 are both positive.
+  Value *A;
+  Constant *C;
+  if (match(Op0, m_Constant()) && match(Op1, m_Add(m_Value(A), m_Constant(C))))
+    if (isKnownNonNegative(A, DL) && isKnownNonNegative(C, DL))
+      return BinaryOperator::Create(
+          I.getOpcode(), Builder->CreateBinOp(I.getOpcode(), Op0, C), A);
+
   // X shift (A srem B) -> X shift (A and B-1) iff B is a power of 2.
   // Because shifts by negative values (which could occur if A were negative)
   // are undefined.
-  Value *A; const APInt *B;
+  const APInt *B;
   if (Op1->hasOneUse() && match(Op1, m_SRem(m_Value(A), m_Power2(B)))) {
     // FIXME: Should this get moved into SimplifyDemandedBits by saying we don't
     // demand the sign bit (and many others) here??

Added: llvm/trunk/test/Transforms/InstCombine/shift-add.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/InstCombine/shift-add.ll?rev=285696&view=auto
==============================================================================
--- llvm/trunk/test/Transforms/InstCombine/shift-add.ll (added)
+++ llvm/trunk/test/Transforms/InstCombine/shift-add.ll Tue Nov  1 10:40:30 2016
@@ -0,0 +1,74 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; This test makes sure that these instructions are properly eliminated.
+;
+; RUN: opt < %s -instcombine -S | FileCheck %s
+
+define i32 @shl_C1_add_A_C2_i32(i16 %A) {
+; CHECK-LABEL: @shl_C1_add_A_C2_i32(
+; CHECK-NEXT:    [[B:%.*]] = zext i16 %A to i32
+; CHECK-NEXT:    [[D:%.*]] = shl i32 192, [[B]]
+; CHECK-NEXT:    ret i32 [[D]]
+;
+  %B = zext i16 %A to i32
+  %C = add i32 %B, 5
+  %D = shl i32 6, %C
+  ret i32 %D
+}
+
+define i32 @ashr_C1_add_A_C2_i32(i32 %A) {
+; CHECK-LABEL: @ashr_C1_add_A_C2_i32(
+; CHECK-NEXT:    ret i32 0
+;
+  %B = and i32 %A, 65535
+  %C = add i32 %B, 5
+  %D = ashr i32 6, %C
+  ret i32 %D
+}
+
+define i32 @lshr_C1_add_A_C2_i32(i32 %A) {
+; CHECK-LABEL: @lshr_C1_add_A_C2_i32(
+; CHECK-NEXT:    [[B:%.*]] = and i32 %A, 65535
+; CHECK-NEXT:    [[D:%.*]] = shl i32 192, [[B]]
+; CHECK-NEXT:    ret i32 [[D]]
+;
+  %B = and i32 %A, 65535
+  %C = add i32 %B, 5
+  %D = shl i32 6, %C
+  ret i32 %D
+}
+
+define <4 x i32> @shl_C1_add_A_C2_v4i32(<4 x i16> %A) {
+; CHECK-LABEL: @shl_C1_add_A_C2_v4i32(
+; CHECK-NEXT:    [[B:%.*]] = zext <4 x i16> %A to <4 x i32>
+; CHECK-NEXT:    [[D:%.*]] = shl <4 x i32> <i32 6, i32 4, i32 undef, i32 -458752>, [[B]]
+; CHECK-NEXT:    ret <4 x i32> [[D]]
+;
+  %B = zext <4 x i16> %A to <4 x i32>
+  %C = add <4 x i32> %B, <i32 0, i32 1, i32 50, i32 16>
+  %D = shl <4 x i32> <i32 6, i32 2, i32 1, i32 -7>, %C
+  ret <4 x i32> %D
+}
+
+define <4 x i32> @ashr_C1_add_A_C2_v4i32(<4 x i32> %A) {
+; CHECK-LABEL: @ashr_C1_add_A_C2_v4i32(
+; CHECK-NEXT:    [[B:%.*]] = and <4 x i32> %A, <i32 0, i32 15, i32 255, i32 65535>
+; CHECK-NEXT:    [[D:%.*]] = ashr <4 x i32> <i32 6, i32 1, i32 undef, i32 -1>, [[B]]
+; CHECK-NEXT:    ret <4 x i32> [[D]]
+;
+  %B = and <4 x i32> %A, <i32 0, i32 15, i32 255, i32 65535>
+  %C = add <4 x i32> %B, <i32 0, i32 1, i32 50, i32 16>
+  %D = ashr <4 x i32> <i32 6, i32 2, i32 1, i32 -7>, %C
+  ret <4 x i32> %D
+}
+
+define <4 x i32> @lshr_C1_add_A_C2_v4i32(<4 x i32> %A) {
+; CHECK-LABEL: @lshr_C1_add_A_C2_v4i32(
+; CHECK-NEXT:    [[B:%.*]] = and <4 x i32> %A, <i32 0, i32 15, i32 255, i32 65535>
+; CHECK-NEXT:    [[D:%.*]] = lshr <4 x i32> <i32 6, i32 1, i32 undef, i32 65535>, [[B]]
+; CHECK-NEXT:    ret <4 x i32> [[D]]
+;
+  %B = and <4 x i32> %A, <i32 0, i32 15, i32 255, i32 65535>
+  %C = add <4 x i32> %B, <i32 0, i32 1, i32 50, i32 16>
+  %D = lshr <4 x i32> <i32 6, i32 2, i32 1, i32 -7>, %C
+  ret <4 x i32> %D
+}




More information about the llvm-commits mailing list