[llvm] 5459d08 - [PowerPC] Fix single-use check and update chain users for ld-splat
Nemanja Ivanovic via llvm-commits
llvm-commits at lists.llvm.org
Tue Oct 27 14:50:03 PDT 2020
Author: Nemanja Ivanovic
Date: 2020-10-27T16:49:38-05:00
New Revision: 5459d08795e370321beb9e50c3b73e9fcd2dd7de
URL: https://github.com/llvm/llvm-project/commit/5459d08795e370321beb9e50c3b73e9fcd2dd7de
DIFF: https://github.com/llvm/llvm-project/commit/5459d08795e370321beb9e50c3b73e9fcd2dd7de.diff
LOG: [PowerPC] Fix single-use check and update chain users for ld-splat
When converting a BUILD_VECTOR or VECTOR_SHUFFLE to a splatting load
as of 1461fb6e783cb946b061f66689b419f74f7fad63, we inaccurately check
for a single user of the load and neglect to update the users of the
output chain of the original load. As a result, we can emit a new
load when the original load is kept and the new load can be reordered
after a dependent store. This patch fixes those two issues.
Fixes https://bugs.llvm.org/show_bug.cgi?id=47891
Added:
llvm/test/CodeGen/PowerPC/pr47891.ll
Modified:
llvm/lib/Target/PowerPC/PPCISelLowering.cpp
Removed:
################################################################################
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 662442f1643b..9bedcf716726 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -9207,7 +9207,12 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
// Checking for a single use of this load, we have to check for vector
// width (128 bits) / ElementSize uses (since each operand of the
// BUILD_VECTOR is a separate use of the value.
- if (InputLoad->getNode()->hasNUsesOfValue(128 / ElementSize, 0) &&
+ unsigned NumUsesOfInputLD = 128 / ElementSize;
+ for (SDValue BVInOp : Op->ops())
+ if (BVInOp.isUndef())
+ NumUsesOfInputLD--;
+ assert(NumUsesOfInputLD > 0 && "No uses of input LD of a build_vector?");
+ if (InputLoad->getNode()->hasNUsesOfValue(NumUsesOfInputLD, 0) &&
((Subtarget.hasVSX() && ElementSize == 64) ||
(Subtarget.hasP9Vector() && ElementSize == 32))) {
SDValue Ops[] = {
@@ -9215,10 +9220,14 @@ SDValue PPCTargetLowering::LowerBUILD_VECTOR(SDValue Op,
LD->getBasePtr(), // Ptr
DAG.getValueType(Op.getValueType()) // VT
};
- return
- DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl,
- DAG.getVTList(Op.getValueType(), MVT::Other),
- Ops, LD->getMemoryVT(), LD->getMemOperand());
+ SDValue LdSplt = DAG.getMemIntrinsicNode(
+ PPCISD::LD_SPLAT, dl, DAG.getVTList(Op.getValueType(), MVT::Other),
+ Ops, LD->getMemoryVT(), LD->getMemOperand());
+ // Replace all uses of the output chain of the original load with the
+ // output chain of the new load.
+ DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1),
+ LdSplt.getValue(1));
+ return LdSplt;
}
}
@@ -9860,6 +9869,7 @@ SDValue PPCTargetLowering::LowerVECTOR_SHUFFLE(SDValue Op,
SDValue LdSplt =
DAG.getMemIntrinsicNode(PPCISD::LD_SPLAT, dl, VTL,
Ops, LD->getMemoryVT(), LD->getMemOperand());
+ DAG.ReplaceAllUsesOfValueWith(InputLoad->getValue(1), LdSplt.getValue(1));
if (LdSplt.getValueType() != SVOp->getValueType(0))
LdSplt = DAG.getBitcast(SVOp->getValueType(0), LdSplt);
return LdSplt;
diff --git a/llvm/test/CodeGen/PowerPC/pr47891.ll b/llvm/test/CodeGen/PowerPC/pr47891.ll
new file mode 100644
index 000000000000..2c53769e069d
--- /dev/null
+++ b/llvm/test/CodeGen/PowerPC/pr47891.ll
@@ -0,0 +1,116 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mcpu=pwr8 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr \
+; RUN: -mtriple=powerpc64le-unknown-unknown < %s | FileCheck %s
+%struct.poly2 = type { [11 x i64] }
+
+; Function Attrs: nofree norecurse nounwind
+define dso_local void @poly2_lshift1(%struct.poly2* nocapture %p) local_unnamed_addr #0 {
+; CHECK-LABEL: poly2_lshift1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: li r4, 72
+; CHECK-NEXT: addis r5, r2, .LCPI0_0 at toc@ha
+; CHECK-NEXT: addis r6, r2, .LCPI0_1 at toc@ha
+; CHECK-NEXT: ld r7, 64(r3)
+; CHECK-NEXT: ld r8, 16(r3)
+; CHECK-NEXT: ld r10, 24(r3)
+; CHECK-NEXT: ld r11, 32(r3)
+; CHECK-NEXT: lxvd2x vs0, r3, r4
+; CHECK-NEXT: addi r5, r5, .LCPI0_0 at toc@l
+; CHECK-NEXT: addi r6, r6, .LCPI0_1 at toc@l
+; CHECK-NEXT: ld r12, 56(r3)
+; CHECK-NEXT: lxvd2x vs1, 0, r5
+; CHECK-NEXT: mtfprd f2, r7
+; CHECK-NEXT: ld r5, 0(r3)
+; CHECK-NEXT: xxswapd v2, vs0
+; CHECK-NEXT: lxvd2x vs0, 0, r6
+; CHECK-NEXT: ld r6, 8(r3)
+; CHECK-NEXT: rotldi r9, r5, 1
+; CHECK-NEXT: sldi r5, r5, 1
+; CHECK-NEXT: xxswapd v3, vs1
+; CHECK-NEXT: std r5, 0(r3)
+; CHECK-NEXT: rotldi r5, r10, 1
+; CHECK-NEXT: rldimi r9, r6, 1, 0
+; CHECK-NEXT: rotldi r6, r6, 1
+; CHECK-NEXT: xxpermdi v4, v2, vs2, 2
+; CHECK-NEXT: xxswapd v5, vs0
+; CHECK-NEXT: rldimi r6, r8, 1, 0
+; CHECK-NEXT: rotldi r8, r8, 1
+; CHECK-NEXT: std r9, 8(r3)
+; CHECK-NEXT: ld r9, 40(r3)
+; CHECK-NEXT: rldimi r8, r10, 1, 0
+; CHECK-NEXT: rldimi r5, r11, 1, 0
+; CHECK-NEXT: std r6, 16(r3)
+; CHECK-NEXT: rotldi r10, r11, 1
+; CHECK-NEXT: ld r11, 48(r3)
+; CHECK-NEXT: std r5, 32(r3)
+; CHECK-NEXT: rotldi r6, r12, 1
+; CHECK-NEXT: vsrd v3, v4, v3
+; CHECK-NEXT: rldimi r10, r9, 1, 0
+; CHECK-NEXT: rotldi r9, r9, 1
+; CHECK-NEXT: std r8, 24(r3)
+; CHECK-NEXT: vsld v2, v2, v5
+; CHECK-NEXT: rotldi r5, r11, 1
+; CHECK-NEXT: rldimi r9, r11, 1, 0
+; CHECK-NEXT: std r10, 40(r3)
+; CHECK-NEXT: rldimi r5, r12, 1, 0
+; CHECK-NEXT: rldimi r6, r7, 1, 0
+; CHECK-NEXT: std r9, 48(r3)
+; CHECK-NEXT: xxlor vs0, v2, v3
+; CHECK-NEXT: std r5, 56(r3)
+; CHECK-NEXT: std r6, 64(r3)
+; CHECK-NEXT: xxswapd vs0, vs0
+; CHECK-NEXT: stxvd2x vs0, r3, r4
+; CHECK-NEXT: blr
+entry:
+ %arrayidx = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 0
+ %0 = load i64, i64* %arrayidx, align 8
+ %shl = shl i64 %0, 1
+ store i64 %shl, i64* %arrayidx, align 8
+ %arrayidx.1 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 1
+ %1 = load i64, i64* %arrayidx.1, align 8
+ %or.1 = call i64 @llvm.fshl.i64(i64 %1, i64 %0, i64 1)
+ store i64 %or.1, i64* %arrayidx.1, align 8
+ %arrayidx.2 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 2
+ %2 = load i64, i64* %arrayidx.2, align 8
+ %or.2 = call i64 @llvm.fshl.i64(i64 %2, i64 %1, i64 1)
+ store i64 %or.2, i64* %arrayidx.2, align 8
+ %arrayidx.3 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 3
+ %3 = load i64, i64* %arrayidx.3, align 8
+ %or.3 = call i64 @llvm.fshl.i64(i64 %3, i64 %2, i64 1)
+ store i64 %or.3, i64* %arrayidx.3, align 8
+ %arrayidx.4 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 4
+ %4 = load i64, i64* %arrayidx.4, align 8
+ %or.4 = call i64 @llvm.fshl.i64(i64 %4, i64 %3, i64 1)
+ store i64 %or.4, i64* %arrayidx.4, align 8
+ %arrayidx.5 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 5
+ %5 = load i64, i64* %arrayidx.5, align 8
+ %or.5 = call i64 @llvm.fshl.i64(i64 %5, i64 %4, i64 1)
+ store i64 %or.5, i64* %arrayidx.5, align 8
+ %arrayidx.6 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 6
+ %6 = load i64, i64* %arrayidx.6, align 8
+ %or.6 = call i64 @llvm.fshl.i64(i64 %6, i64 %5, i64 1)
+ store i64 %or.6, i64* %arrayidx.6, align 8
+ %arrayidx.7 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 7
+ %7 = load i64, i64* %arrayidx.7, align 8
+ %or.7 = call i64 @llvm.fshl.i64(i64 %7, i64 %6, i64 1)
+ store i64 %or.7, i64* %arrayidx.7, align 8
+ %arrayidx.8 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 8
+ %8 = load i64, i64* %arrayidx.8, align 8
+ %or.8 = call i64 @llvm.fshl.i64(i64 %8, i64 %7, i64 1)
+ store i64 %or.8, i64* %arrayidx.8, align 8
+ %arrayidx.9 = getelementptr inbounds %struct.poly2, %struct.poly2* %p, i64 0, i32 0, i64 9
+ %9 = bitcast i64* %arrayidx.9 to <2 x i64>*
+ %10 = load <2 x i64>, <2 x i64>* %9, align 8
+ %11 = insertelement <2 x i64> undef, i64 %8, i32 0
+ %12 = shufflevector <2 x i64> %11, <2 x i64> %10, <2 x i32> <i32 0, i32 2>
+ %13 = call <2 x i64> @llvm.fshl.v2i64(<2 x i64> %10, <2 x i64> %12, <2 x i64> <i64 1, i64 1>)
+ %14 = bitcast i64* %arrayidx.9 to <2 x i64>*
+ store <2 x i64> %13, <2 x i64>* %14, align 8
+ ret void
+}
+
+; Function Attrs: nofree nosync nounwind readnone speculatable willreturn
+declare i64 @llvm.fshl.i64(i64, i64, i64) #1
+
+; Function Attrs: nofree nosync nounwind readnone speculatable willreturn
+declare <2 x i64> @llvm.fshl.v2i64(<2 x i64>, <2 x i64>, <2 x i64>) #1
More information about the llvm-commits
mailing list