[llvm] r340037 - [PowerPC] Generate lxsd instead of the ld->mtvsrd sequence for vector loads

Stefan Pintilie via llvm-commits llvm-commits at lists.llvm.org
Fri Aug 17 08:15:26 PDT 2018


Author: stefanp
Date: Fri Aug 17 08:15:26 2018
New Revision: 340037

URL: http://llvm.org/viewvc/llvm-project?rev=340037&view=rev
Log:
[PowerPC] Generate lxsd instead of the ld->mtvsrd sequence for vector loads

This patch addresses:

- Implementation within PPCISelLowering.cpp to check if we should use direct
load into vector instructions (such as lxsd/lfd ) when the scalar_to_vector
function is used; which will allow us to catch as many cases of the
scalar_to_vector uses as possible to translate the ld->mtvsrd sequence into
lxsd.

- Test cases to exhibit the behaviour of emitting lxsd/lfd.

Patch by amyk

Differential revision: https://reviews.llvm.org/D49698

Added:
    llvm/trunk/test/CodeGen/PowerPC/pre-inc-disable.ll
Modified:
    llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp

Modified: llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp?rev=340037&r1=340036&r2=340037&view=diff
==============================================================================
--- llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp (original)
+++ llvm/trunk/lib/Target/PowerPC/PPCISelLowering.cpp Fri Aug 17 08:15:26 2018
@@ -2402,6 +2402,28 @@ bool PPCTargetLowering::SelectAddressReg
   return true;
 }
 
+/// Returns true if we should use a direct load into vector instruction
+/// (such as lxsd or lfd), instead of a load into gpr + direct move sequence.
+static bool usePartialVectorLoads(SDNode *N) {
+  if (!N->hasOneUse())
+    return false;
+
+  // If there are any other uses other than scalar to vector, then we should
+  // keep it as a scalar load -> direct move pattern to prevent multiple
+  // loads.  Currently, only check for i64 since we have lxsd/lfd to do this
+  // efficiently, but no update equivalent.
+  if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) {
+    EVT MemVT = LD->getMemoryVT();
+    if (MemVT.isSimple() && MemVT.getSimpleVT().SimpleTy == MVT::i64) {
+      SDNode *User = *(LD->use_begin());
+      if (User->getOpcode() == ISD::SCALAR_TO_VECTOR)
+        return true;
+    }
+  }
+
+  return false;
+}
+
 /// getPreIndexedAddressParts - returns true by value, base pointer and
 /// offset pointer and addressing mode by reference if the node's address
 /// can be legally represented as pre-indexed load / store address.
@@ -2427,6 +2449,13 @@ bool PPCTargetLowering::getPreIndexedAdd
   } else
     return false;
 
+  // Do not generate pre-inc forms for specific loads that feed scalar_to_vector
+  // instructions because we can fold these into a more efficient instruction
+  // instead, (such as LXSD).
+  if (isLoad && usePartialVectorLoads(N)) {
+    return false;
+  }
+
   // PowerPC doesn't have preinc load/store instructions for vectors (except
   // for QPX, which does have preinc r+r forms).
   if (VT.isVector()) {

Added: llvm/trunk/test/CodeGen/PowerPC/pre-inc-disable.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/PowerPC/pre-inc-disable.ll?rev=340037&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/PowerPC/pre-inc-disable.ll (added)
+++ llvm/trunk/test/CodeGen/PowerPC/pre-inc-disable.ll Fri Aug 17 08:15:26 2018
@@ -0,0 +1,274 @@
+; RUN: llc -mcpu=pwr9 -O3 -verify-machineinstrs -ppc-vsr-nums-as-vr \
+; RUN:     -ppc-asm-full-reg-names -mtriple=powerpc64le-unknown-linux-gnu \
+; RUN:     < %s | FileCheck %s
+
+; RUN: llc -mcpu=pwr9 -O3 -verify-machineinstrs -ppc-vsr-nums-as-vr \
+; RUN:     -ppc-asm-full-reg-names -mtriple=powerpc64-unknown-linux-gnu \
+; RUN:     < %s | FileCheck %s --check-prefix=P9BE
+
+; Function Attrs: norecurse nounwind readonly
+define signext i32 @test_pre_inc_disable_1(i8* nocapture readonly %pix1, i32 signext %i_stride_pix1, i8* nocapture readonly %pix2) {
+; CHECK-LABEL: test_pre_inc_disable_1:
+; CHECK:   # %bb.0: # %entry
+; CHECK:    addis r6, r2
+; CHECK:    addis r7, r2,
+; CHECK:    lfd f0, 0(r5)
+; CHECK:    xxlxor v4, v4, v4
+; CHECK:    addi r5, r6,
+; CHECK:    addi r6, r7,
+; CHECK:    lxvx v2, 0, r5
+; CHECK:    lxvx v3, 0, r6
+; CHECK:    xxpermdi v5, f0, f0, 2
+; CHECK:    vperm v0, v4, v5, v2
+; CHECK:    vperm v5, v5, v4, v3
+; CHECK:    xvnegsp v5, v5
+; CHECK:    xvnegsp v0, v0
+
+; CHECK:  .LBB0_1: # %for.cond1.preheader
+; CHECK:    lfd f0, 0(r3)
+; CHECK:    xxpermdi v1, f0, f0, 2
+; CHECK:    vperm v6, v1, v4, v3
+; CHECK:    vperm v1, v4, v1, v2
+; CHECK:    xvnegsp v6, v6
+; CHECK:    xvnegsp v1, v1
+; CHECK:    vabsduw v1, v1, v0
+; CHECK:    vabsduw v6, v6, v5
+; CHECK:    vadduwm v1, v6, v1
+; CHECK:    xxswapd v6, v1
+; CHECK:    vadduwm v1, v1, v6
+; CHECK:    xxspltw v6, v1, 2
+; CHECK:    vadduwm v1, v1, v6
+; CHECK:    vextuwrx r7, r6, v1
+; CHECK:    ldux r8, r3, r4
+; CHECK:    add r3, r3, r4
+; CHECK:    add r5, r7, r5
+; CHECK:    mtvsrd f0, r8
+; CHECK:    xxswapd v1, vs0
+; CHECK:    vperm v6, v1, v4, v3
+; CHECK:    vperm v1, v4, v1, v2
+; CHECK:    xvnegsp v6, v6
+; CHECK:    xvnegsp v1, v1
+; CHECK:    vabsduw v1, v1, v0
+; CHECK:    vabsduw v6, v6, v5
+; CHECK:    vadduwm v1, v6, v1
+; CHECK:    xxswapd v6, v1
+; CHECK:    vadduwm v1, v1, v6
+; CHECK:    xxspltw v6, v1, 2
+; CHECK:    vadduwm v1, v1, v6
+; CHECK:    vextuwrx r8, r6, v1
+; CHECK:    add r5, r8, r5
+; CHECK:    bdnz .LBB0_1
+; CHECK:    extsw r3, r5
+; CHECK:    blr
+
+; P9BE-LABEL: test_pre_inc_disable_1:
+; P9BE:    addis r6, r2,
+; P9BE:    addis r7, r2,
+; P9BE:    lfd f0, 0(r5)
+; P9BE:    xxlxor v4, v4, v4
+; P9BE:    addi r5, r6,
+; P9BE:    addi r6, r7,
+; P9BE:    lxvx v2, 0, r5
+; P9BE:    lxvx v3, 0, r6
+; P9BE:    xxlor v5, vs0, vs0
+; P9BE:    li r6, 0
+; P9BE:    vperm v0, v4, v5, v2
+; P9BE:    vperm v5, v4, v5, v3
+; P9BE:    xvnegsp v5, v5
+; P9BE:    xvnegsp v0, v0
+
+; P9BE:  .LBB0_1: # %for.cond1.preheader
+; P9BE:    lfd f0, 0(r3)
+; P9BE:    xxlor v1, vs0, vs0
+; P9BE:    vperm v6, v4, v1, v3
+; P9BE:    vperm v1, v4, v1, v2
+; P9BE:    xvnegsp v6, v6
+; P9BE:    xvnegsp v1, v1
+; P9BE:    vabsduw v1, v1, v0
+; P9BE:    vabsduw v6, v6, v5
+; P9BE:    vadduwm v1, v6, v1
+; P9BE:    xxswapd v6, v1
+; P9BE:    vadduwm v1, v1, v6
+; P9BE:    xxspltw v6, v1, 1
+; P9BE:    vadduwm v1, v1, v6
+; P9BE:    vextuwlx r7, r6, v1
+; P9BE:    ldux r8, r3, r4
+; P9BE:    add r3, r3, r4
+; P9BE:    add r5, r7, r5
+; P9BE:    mtvsrd v1, r8
+; P9BE:    vperm v6, v4, v1, v3
+; P9BE:    vperm v1, v4, v1, v2
+; P9BE:    xvnegsp v6, v6
+; P9BE:    xvnegsp v1, v1
+; P9BE:    vabsduw v1, v1, v0
+; P9BE:    vabsduw v6, v6, v5
+; P9BE:    vadduwm v1, v6, v1
+; P9BE:    xxswapd v6, v1
+; P9BE:    vadduwm v1, v1, v6
+; P9BE:    xxspltw v6, v1, 1
+; P9BE:    vadduwm v1, v1, v6
+; P9BE:    vextuwlx r8, r6, v1
+; P9BE:    add r5, r8, r5
+; P9BE:    bdnz .LBB0_1
+; P9BE:    extsw r3, r5
+; P9BE:    blr
+entry:
+  %idx.ext = sext i32 %i_stride_pix1 to i64
+  %0 = bitcast i8* %pix2 to <8 x i8>*
+  %1 = load <8 x i8>, <8 x i8>* %0, align 1
+  %2 = zext <8 x i8> %1 to <8 x i32>
+  br label %for.cond1.preheader
+
+for.cond1.preheader:                              ; preds = %for.cond1.preheader, %entry
+  %y.024 = phi i32 [ 0, %entry ], [ %inc9.1, %for.cond1.preheader ]
+  %i_sum.023 = phi i32 [ 0, %entry ], [ %op.extra.1, %for.cond1.preheader ]
+  %pix1.addr.022 = phi i8* [ %pix1, %entry ], [ %add.ptr.1, %for.cond1.preheader ]
+  %3 = bitcast i8* %pix1.addr.022 to <8 x i8>*
+  %4 = load <8 x i8>, <8 x i8>* %3, align 1
+  %5 = zext <8 x i8> %4 to <8 x i32>
+  %6 = sub nsw <8 x i32> %5, %2
+  %7 = icmp slt <8 x i32> %6, zeroinitializer
+  %8 = sub nsw <8 x i32> zeroinitializer, %6
+  %9 = select <8 x i1> %7, <8 x i32> %8, <8 x i32> %6
+  %rdx.shuf = shufflevector <8 x i32> %9, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+  %bin.rdx = add nsw <8 x i32> %9, %rdx.shuf
+  %rdx.shuf32 = shufflevector <8 x i32> %bin.rdx, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %bin.rdx33 = add nsw <8 x i32> %bin.rdx, %rdx.shuf32
+  %rdx.shuf34 = shufflevector <8 x i32> %bin.rdx33, <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %bin.rdx35 = add nsw <8 x i32> %bin.rdx33, %rdx.shuf34
+  %10 = extractelement <8 x i32> %bin.rdx35, i32 0
+  %op.extra = add nsw i32 %10, %i_sum.023
+  %add.ptr = getelementptr inbounds i8, i8* %pix1.addr.022, i64 %idx.ext
+  %11 = bitcast i8* %add.ptr to <8 x i8>*
+  %12 = load <8 x i8>, <8 x i8>* %11, align 1
+  %13 = zext <8 x i8> %12 to <8 x i32>
+  %14 = sub nsw <8 x i32> %13, %2
+  %15 = icmp slt <8 x i32> %14, zeroinitializer
+  %16 = sub nsw <8 x i32> zeroinitializer, %14
+  %17 = select <8 x i1> %15, <8 x i32> %16, <8 x i32> %14
+  %rdx.shuf.1 = shufflevector <8 x i32> %17, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+  %bin.rdx.1 = add nsw <8 x i32> %17, %rdx.shuf.1
+  %rdx.shuf32.1 = shufflevector <8 x i32> %bin.rdx.1, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %bin.rdx33.1 = add nsw <8 x i32> %bin.rdx.1, %rdx.shuf32.1
+  %rdx.shuf34.1 = shufflevector <8 x i32> %bin.rdx33.1, <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %bin.rdx35.1 = add nsw <8 x i32> %bin.rdx33.1, %rdx.shuf34.1
+  %18 = extractelement <8 x i32> %bin.rdx35.1, i32 0
+  %op.extra.1 = add nsw i32 %18, %op.extra
+  %add.ptr.1 = getelementptr inbounds i8, i8* %add.ptr, i64 %idx.ext
+  %inc9.1 = add nuw nsw i32 %y.024, 2
+  %exitcond.1 = icmp eq i32 %inc9.1, 8
+  br i1 %exitcond.1, label %for.cond.cleanup, label %for.cond1.preheader
+
+for.cond.cleanup:                                 ; preds = %for.cond1.preheader
+  ret i32 %op.extra.1
+}
+
+; Function Attrs: norecurse nounwind readonly
+define signext i32 @test_pre_inc_disable_2(i8* nocapture readonly %pix1, i8* nocapture readonly %pix2) {
+; CHECK-LABEL: test_pre_inc_disable_2:
+; CHECK:    addis r5, r2,
+; CHECK:    addis r6, r2,
+; CHECK:    lfd f0, 0(r3)
+; CHECK:    lfd f1, 0(r4)
+; CHECK:    xxlxor v0, v0, v0
+; CHECK:    addi r3, r5, .LCPI1_0 at toc@l
+; CHECK:    addi r4, r6, .LCPI1_1 at toc@l
+; CHECK:    lxvx v2, 0, r3
+; CHECK:    lxvx v3, 0, r4
+; CHECK:    xxpermdi v4, f0, f0, 2
+; CHECK:    xxpermdi v5, f1, f1, 2
+; CHECK:    vperm v1, v4, v0, v2
+; CHECK:    vperm v4, v0, v4, v3
+; CHECK:    vperm v2, v5, v0, v2
+; CHECK:    vperm v3, v0, v5, v3
+; CHECK:    xvnegsp v5, v1
+; CHECK:    xvnegsp v4, v4
+; CHECK:    xvnegsp v2, v2
+; CHECK:    xvnegsp v3, v3
+; CHECK:    vabsduw v3, v4, v3
+; CHECK:    vabsduw v2, v5, v2
+; CHECK:    vadduwm v2, v2, v3
+; CHECK:    xxswapd v3, v2
+; CHECK:    vadduwm v2, v2, v3
+; CHECK:    xxspltw v3, v2, 2
+; CHECK:    vadduwm v2, v2, v3
+; CHECK:    vextuwrx r3, r3, v2
+; CHECK:    extsw r3, r3
+; CHECK:    blr
+
+; P9BE-LABEL: test_pre_inc_disable_2:
+; P9BE:    addis r5, r2,
+; P9BE:    addis r6, r2,
+; P9BE:    lfd f0, 0(r3)
+; P9BE:    lfd f1, 0(r4)
+; P9BE:    xxlxor v5, v5, v5
+; P9BE:    addi r3, r5,
+; P9BE:    addi r4, r6,
+; P9BE:    lxvx v2, 0, r3
+; P9BE:    lxvx v3, 0, r4
+; P9BE:    xxlor v4, vs0, vs0
+; P9BE:    xxlor v0, vs1, vs1
+; P9BE:    vperm v1, v5, v4, v2
+; P9BE:    vperm v4, v5, v4, v3
+; P9BE:    vperm v2, v5, v0, v2
+; P9BE:    vperm v3, v5, v0, v3
+; P9BE:    xvnegsp v5, v1
+; P9BE:    xvnegsp v4, v4
+; P9BE:    xvnegsp v2, v2
+; P9BE:    xvnegsp v3, v3
+; P9BE:    vabsduw v3, v4, v3
+; P9BE:    vabsduw v2, v5, v2
+; P9BE:    vadduwm v2, v2, v3
+; P9BE:    xxswapd v3, v2
+; P9BE:    vadduwm v2, v2, v3
+; P9BE:    xxspltw v3, v2, 1
+; P9BE:    vadduwm v2, v2, v3
+; P9BE:    vextuwlx r3, r3, v2
+; P9BE:    extsw r3, r3
+; P9BE:    blr
+entry:
+  %0 = bitcast i8* %pix1 to <8 x i8>*
+  %1 = load <8 x i8>, <8 x i8>* %0, align 1
+  %2 = zext <8 x i8> %1 to <8 x i32>
+  %3 = bitcast i8* %pix2 to <8 x i8>*
+  %4 = load <8 x i8>, <8 x i8>* %3, align 1
+  %5 = zext <8 x i8> %4 to <8 x i32>
+  %6 = sub nsw <8 x i32> %2, %5
+  %7 = icmp slt <8 x i32> %6, zeroinitializer
+  %8 = sub nsw <8 x i32> zeroinitializer, %6
+  %9 = select <8 x i1> %7, <8 x i32> %8, <8 x i32> %6
+  %rdx.shuf = shufflevector <8 x i32> %9, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
+  %bin.rdx = add nsw <8 x i32> %9, %rdx.shuf
+  %rdx.shuf12 = shufflevector <8 x i32> %bin.rdx, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %bin.rdx13 = add nsw <8 x i32> %bin.rdx, %rdx.shuf12
+  %rdx.shuf14 = shufflevector <8 x i32> %bin.rdx13, <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
+  %bin.rdx15 = add nsw <8 x i32> %bin.rdx13, %rdx.shuf14
+  %10 = extractelement <8 x i32> %bin.rdx15, i32 0
+  ret i32 %10
+}
+
+
+; Generated from C source:
+;
+;#include <stdint.h>
+;#include <stdlib.h>
+;int test_pre_inc_disable_1( uint8_t *pix1, int i_stride_pix1, uint8_t *pix2 ) {
+;    int i_sum = 0;
+;    for( int y = 0; y < 8; y++ ) {
+;        for( int x = 0; x < 8; x++) {
+;            i_sum += abs( pix1[x] - pix2[x] )
+;        }
+;        pix1 += i_stride_pix1;
+;    }
+;    return i_sum;
+;}
+
+;int test_pre_inc_disable_2( uint8_t *pix1, uint8_t *pix2 ) {
+;  int i_sum = 0;
+;  for( int x = 0; x < 8; x++ ) {
+;    i_sum += abs( pix1[x] - pix2[x] );
+;  }
+;
+;  return i_sum;
+;}
+




More information about the llvm-commits mailing list