[llvm] r299701 - [SelectionDAG] [ARM CodeGen] Fix chain information of LowerMUL

Huihui Zhang via llvm-commits llvm-commits at lists.llvm.org
Thu Apr 6 13:22:51 PDT 2017


Author: huihuiz
Date: Thu Apr  6 15:22:51 2017
New Revision: 299701

URL: http://llvm.org/viewvc/llvm-project?rev=299701&view=rev
Log:
[SelectionDAG] [ARM CodeGen] Fix chain information of LowerMUL

In LowerMUL, the chain information is not preserved for the new
created Load SDNode.

For example, if a Store alias with one of the operand of Mul.
The Load for that operand need to be scheduled before the Store.
The dependence is recorded in the chain of Store, in TokenFactor.
However, when lowering MUL, the SDNodes for the new Loads for
VMULL are not updated in the TokenFactor for the Store. Thus the
chain is not preserved for the lowered VMULL.



Added:
    llvm/trunk/test/CodeGen/ARM/lowerMUL-newload.ll
Modified:
    llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp

Modified: llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp?rev=299701&r1=299700&r2=299701&view=diff
==============================================================================
--- llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/ARM/ARMISelLowering.cpp Thu Apr  6 15:22:51 2017
@@ -6989,8 +6989,19 @@ static SDValue SkipExtensionForVMULL(SDN
                                         N->getValueType(0),
                                         N->getOpcode());
 
-  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N))
-    return SkipLoadExtensionForVMULL(LD, DAG);
+  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
+    assert((ISD::isSEXTLoad(LD) || ISD::isZEXTLoad(LD)) &&
+           "Expected extending load");
+
+    SDValue newLoad = SkipLoadExtensionForVMULL(LD, DAG);
+    DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 1), newLoad.getValue(1));
+    unsigned Opcode = ISD::isSEXTLoad(LD) ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND;
+    SDValue extLoad =
+        DAG.getNode(Opcode, SDLoc(newLoad), LD->getValueType(0), newLoad);
+    DAG.ReplaceAllUsesOfValueWith(SDValue(LD, 0), extLoad);
+
+    return newLoad;
+  }
 
   // Otherwise, the value must be a BUILD_VECTOR.  For v2i64, it will
   // have been legalized as a BITCAST from v4i32.

Added: llvm/trunk/test/CodeGen/ARM/lowerMUL-newload.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/ARM/lowerMUL-newload.ll?rev=299701&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/ARM/lowerMUL-newload.ll (added)
+++ llvm/trunk/test/CodeGen/ARM/lowerMUL-newload.ll Thu Apr  6 15:22:51 2017
@@ -0,0 +1,115 @@
+; RUN: llc < %s -mtriple=arm-eabi -mcpu=krait | FileCheck %s
+
+define void @func1(i16* %a, i16* %b, i16* %c) {
+entry:
+; The test case trying to vectorize the pseudo code below.
+; a[i] = b[i] + c[i];
+; b[i] = a[i] * c[i];
+; a[i] = b[i] + a[i] * c[i];
+;
+; Checking that vector load a[i] for "a[i] = b[i] + a[i] * c[i]" is
+; scheduled before the first vector store to "a[i] = b[i] + c[i]".
+; Checking that there is no vector load a[i] scheduled between the vector
+; stores to a[i], otherwise the load of a[i] will be polluted by the first
+; vector store to a[i].
+;
+; This test case check that the chain information is updated during
+; lowerMUL for the new created Load SDNode.
+
+; CHECK: vldr {{.*}} [r0, #16]
+; CHECK: vstr {{.*}} [r0, #16]
+; CHECK-NOT: vldr {{.*}} [r0, #16]
+; CHECK: vstr {{.*}} [r0, #16]
+
+  %scevgep0 = getelementptr i16, i16* %a, i32 8
+  %vector_ptr0 = bitcast i16* %scevgep0 to <4 x i16>*
+  %vec0 = load <4 x i16>, <4 x i16>* %vector_ptr0, align 8
+  %scevgep1 = getelementptr i16, i16* %b, i32 8
+  %vector_ptr1 = bitcast i16* %scevgep1 to <4 x i16>*
+  %vec1 = load <4 x i16>, <4 x i16>* %vector_ptr1, align 8
+  %0 = zext <4 x i16> %vec1 to <4 x i32>
+  %scevgep2 = getelementptr i16, i16* %c, i32 8
+  %vector_ptr2 = bitcast i16* %scevgep2 to <4 x i16>*
+  %vec2 = load <4 x i16>, <4 x i16>* %vector_ptr2, align 8
+  %1 = sext <4 x i16> %vec2 to <4 x i32>
+  %vec3 = add <4 x i32> %1, %0
+  %2 = trunc <4 x i32> %vec3 to <4 x i16>
+  %scevgep3 = getelementptr i16, i16* %a, i32 8
+  %vector_ptr3 = bitcast i16* %scevgep3 to <4 x i16>*
+  store <4 x i16> %2, <4 x i16>* %vector_ptr3, align 8
+  %vector_ptr4 = bitcast i16* %scevgep2 to <4 x i16>*
+  %vec4 = load <4 x i16>, <4 x i16>* %vector_ptr4, align 8
+  %3 = sext <4 x i16> %vec4 to <4 x i32>
+  %vec5 = mul <4 x i32> %3, %vec3
+  %4 = trunc <4 x i32> %vec5 to <4 x i16>
+  %vector_ptr5 = bitcast i16* %scevgep1 to <4 x i16>*
+  store <4 x i16> %4, <4 x i16>* %vector_ptr5, align 8
+  %5 = sext <4 x i16> %vec0 to <4 x i32>
+  %vector_ptr6 = bitcast i16* %scevgep2 to <4 x i16>*
+  %vec6 = load <4 x i16>, <4 x i16>* %vector_ptr6, align 8
+  %6 = sext <4 x i16> %vec6 to <4 x i32>
+  %vec7 = mul <4 x i32> %6, %5
+  %vec8 = add <4 x i32> %vec7, %vec5
+  %7 = trunc <4 x i32> %vec8 to <4 x i16>
+  %vector_ptr7 = bitcast i16* %scevgep3 to <4 x i16>*
+  store <4 x i16> %7, <4 x i16>* %vector_ptr7, align 8
+  ret void
+}
+
+define void @func2(i16* %a, i16* %b, i16* %c) {
+entry:
+; The test case trying to vectorize the pseudo code below.
+; a[i] = b[i] + c[i];
+; b[i] = a[i] * c[i];
+; a[i] = b[i] + a[i] * c[i] + a[i];
+;
+; Checking that vector load a[i] for "a[i] = b[i] + a[i] * c[i] + a[i]"
+; is scheduled before the first vector store to "a[i] = b[i] + c[i]".
+; Checking that there is no vector load a[i] scheduled between the first
+; vector store to a[i] and the vector add of a[i], otherwise the load of
+; a[i] will be polluted by the first vector store to a[i].
+;
+; This test case check that both the chain and value of the new created
+; Load SDNode are updated during lowerMUL.
+
+; CHECK: vldr {{.*}} [r0, #16]
+; CHECK: vstr {{.*}} [r0, #16]
+; CHECK-NOT: vldr {{.*}} [r0, #16]
+; CHECK: vaddw.s16
+; CHECK: vstr {{.*}} [r0, #16]
+
+  %scevgep0 = getelementptr i16, i16* %a, i32 8
+  %vector_ptr0 = bitcast i16* %scevgep0 to <4 x i16>*
+  %vec0 = load <4 x i16>, <4 x i16>* %vector_ptr0, align 8
+  %scevgep1 = getelementptr i16, i16* %b, i32 8
+  %vector_ptr1 = bitcast i16* %scevgep1 to <4 x i16>*
+  %vec1 = load <4 x i16>, <4 x i16>* %vector_ptr1, align 8
+  %0 = zext <4 x i16> %vec1 to <4 x i32>
+  %scevgep2 = getelementptr i16, i16* %c, i32 8
+  %vector_ptr2 = bitcast i16* %scevgep2 to <4 x i16>*
+  %vec2 = load <4 x i16>, <4 x i16>* %vector_ptr2, align 8
+  %1 = sext <4 x i16> %vec2 to <4 x i32>
+  %vec3 = add <4 x i32> %1, %0
+  %2 = trunc <4 x i32> %vec3 to <4 x i16>
+  %scevgep3 = getelementptr i16, i16* %a, i32 8
+  %vector_ptr3 = bitcast i16* %scevgep3 to <4 x i16>*
+  store <4 x i16> %2, <4 x i16>* %vector_ptr3, align 8
+  %vector_ptr4 = bitcast i16* %scevgep2 to <4 x i16>*
+  %vec4 = load <4 x i16>, <4 x i16>* %vector_ptr4, align 8
+  %3 = sext <4 x i16> %vec4 to <4 x i32>
+  %vec5 = mul <4 x i32> %3, %vec3
+  %4 = trunc <4 x i32> %vec5 to <4 x i16>
+  %vector_ptr5 = bitcast i16* %scevgep1 to <4 x i16>*
+  store <4 x i16> %4, <4 x i16>* %vector_ptr5, align 8
+  %5 = sext <4 x i16> %vec0 to <4 x i32>
+  %vector_ptr6 = bitcast i16* %scevgep2 to <4 x i16>*
+  %vec6 = load <4 x i16>, <4 x i16>* %vector_ptr6, align 8
+  %6 = sext <4 x i16> %vec6 to <4 x i32>
+  %vec7 = mul <4 x i32> %6, %5
+  %vec8 = add <4 x i32> %vec7, %vec5
+  %vec9 = add <4 x i32> %vec8, %5
+  %7 = trunc <4 x i32> %vec9 to <4 x i16>
+  %vector_ptr7 = bitcast i16* %scevgep3 to <4 x i16>*
+  store <4 x i16> %7, <4 x i16>* %vector_ptr7, align 8
+  ret void
+}




More information about the llvm-commits mailing list