[llvm] r245689 - [DAGCombiner] Fold together mul and shl when both are by a constant

John Brawn via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 21 03:48:17 PDT 2015


Author: john.brawn
Date: Fri Aug 21 05:48:17 2015
New Revision: 245689

URL: http://llvm.org/viewvc/llvm-project?rev=245689&view=rev
Log:
[DAGCombiner] Fold together mul and shl when both are by a constant

This is intended to improve code generation for GEPs, as the index value is
shifted by the element size and in GEPs of multi-dimensional arrays the index
of higher dimensions is multiplied by the lower dimension size.

Differential Revision: http://reviews.llvm.org/D12197

Added:
    llvm/trunk/test/CodeGen/ARM/gep-optimization.ll
Modified:
    llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp

Modified: llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp?rev=245689&r1=245688&r2=245689&view=diff
==============================================================================
--- llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp (original)
+++ llvm/trunk/lib/CodeGen/SelectionDAG/DAGCombiner.cpp Fri Aug 21 05:48:17 2015
@@ -4455,6 +4455,14 @@ SDValue DAGCombiner::visitSHL(SDNode *N)
     return DAG.getNode(ISD::ADD, SDLoc(N), VT, Shl0, Shl1);
   }
 
+  // fold (shl (mul x, c1), c2) -> (mul x, c1 << c2)
+  if (N1C && N0.getOpcode() == ISD::MUL && N0.getNode()->hasOneUse()) {
+    if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
+      SDValue Folded = DAG.FoldConstantArithmetic(ISD::SHL, SDLoc(N1), VT, N0C1, N1C);
+      return DAG.getNode(ISD::MUL, SDLoc(N), VT, N0.getOperand(0), Folded);
+    }
+  }
+
   if (N1C && !N1C->isOpaque())
     if (SDValue NewSHL = visitShiftByConstant(N, N1C))
       return NewSHL;

Added: llvm/trunk/test/CodeGen/ARM/gep-optimization.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/gep-optimization.ll?rev=245689&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/gep-optimization.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/gep-optimization.ll Fri Aug 21 05:48:17 2015
@@ -0,0 +1,77 @@
+; RUN: llc < %s -mtriple=armv7a-eabi   | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-AT2
+; RUN: llc < %s -mtriple=thumbv7m-eabi | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-AT2
+; RUN: llc < %s -mtriple=thumbv6m-eabi | FileCheck %s -check-prefix=CHECK -check-prefix=CHECK-T1
+
+; This test checks that various kinds of getelementptr are all optimised to a
+; simple multiply plus add, with the add being done by a register offset if the
+; result is used in a load.
+
+; CHECK-LABEL: calc_1d:
+; CHECK: mov{{s?}} [[REG1:r[0-9]+]], #84
+; CHECK-AT2: mla r0, r1, [[REG1]], r0
+; CHECK-T1: muls [[REG2:r[0-9]+]], r1, [[REG1]]
+; CHECK-T1: adds r0, r0, [[REG2]]
+define i32* @calc_1d(i32* %p, i32 %n) {
+entry:
+  %mul = mul nsw i32 %n, 21
+  %add.ptr = getelementptr inbounds i32, i32* %p, i32 %mul
+  ret i32* %add.ptr
+}
+
+; CHECK-LABEL: load_1d:
+; CHECK: mov{{s?}} [[REG1:r[0-9]+]], #84
+; CHECK: mul{{s?}} [[REG2:r[0-9]+]],{{( r1,)?}} [[REG1]]{{(, r1)?}}
+; CHECK: ldr r0, [r0, [[REG2]]]
+define i32 @load_1d(i32* %p, i32 %n) #1 {
+entry:
+  %mul = mul nsw i32 %n, 21
+  %arrayidx = getelementptr inbounds i32, i32* %p, i32 %mul
+  %0 = load i32, i32* %arrayidx, align 4
+  ret i32 %0
+}
+
+; CHECK-LABEL: calc_2d_a:
+; CHECK: mov{{s?}} [[REG1:r[0-9]+]], #84
+; CHECK-AT2: mla r0, r1, [[REG1]], r0
+; CHECK-T1: muls [[REG2:r[0-9]+]], r1, [[REG1]]
+; CHECK-T1: adds r0, r0, [[REG2]]
+define i32* @calc_2d_a([100 x i32]* %p, i32 %n) {
+entry:
+  %mul = mul nsw i32 %n, 21
+  %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %p, i32 0, i32 %mul
+  ret i32* %arrayidx1
+}
+
+; CHECK-LABEL: load_2d_a:
+; CHECK: mov{{s?}} [[REG1:r[0-9]+]], #84
+; CHECK: mul{{s?}} [[REG2:r[0-9]+]],{{( r1,)?}} [[REG1]]{{(, r1)?}}
+; CHECK: ldr r0, [r0, [[REG2]]]
+define i32 @load_2d_a([100 x i32]* %p, i32 %n) #1 {
+entry:
+  %mul = mul nsw i32 %n, 21
+  %arrayidx1 = getelementptr inbounds [100 x i32], [100 x i32]* %p, i32 0, i32 %mul
+  %0 = load i32, i32* %arrayidx1, align 4
+  ret i32 %0
+}
+
+; CHECK-LABEL: calc_2d_b:
+; CHECK: mov{{s?}} [[REG1:r[0-9]+]], #84
+; CHECK-AT2: mla r0, r1, [[REG1]], r0
+; CHECK-T1: muls [[REG2:r[0-9]+]], r1, [[REG1]]
+; CHECK-T1: adds r0, r0, [[REG2]]
+define i32* @calc_2d_b([21 x i32]* %p, i32 %n) {
+entry:
+  %arrayidx1 = getelementptr inbounds [21 x i32], [21 x i32]* %p, i32 %n, i32 0
+  ret i32* %arrayidx1
+}
+
+; CHECK-LABEL: load_2d_b:
+; CHECK: mov{{s?}} [[REG1:r[0-9]+]], #84
+; CHECK: mul{{s?}} [[REG2:r[0-9]+]],{{( r1,)?}} [[REG1]]{{(, r1)?}}
+; CHECK: ldr r0, [r0, [[REG2]]]
+define i32 @load_2d_b([21 x i32]* %p, i32 %n) {
+entry:
+  %arrayidx1 = getelementptr inbounds [21 x i32], [21 x i32]* %p, i32 %n, i32 0
+  %0 = load i32, i32* %arrayidx1, align 4
+  ret i32 %0
+}




More information about the llvm-commits mailing list