[llvm] [RISCV] Add Tune to DontSinkSplatOperands (PR #79199)
Michael Maitland via llvm-commits
llvm-commits at lists.llvm.org
Thu Jan 25 09:43:28 PST 2024
https://github.com/michaelmaitland updated https://github.com/llvm/llvm-project/pull/79199
>From 99aad5a4939b5d925b0a214d114bc627ff754a0f Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Tue, 23 Jan 2024 11:59:49 -0800
Subject: [PATCH 1/5] [RISCV] Add Tune to DontSinkSplatOperands
A CPU may prefer to not sink splat operands, one reason being that it could
require a S2V transfer buffer to move scalars into buffers.
This is a precommit for https://github.com/llvm/llvm-project/pull/79015.
---
llvm/lib/Target/RISCV/RISCVFeatures.td | 7 +
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 8 +
.../RISCV/rvv/dont-sink-splat-operands.ll | 353 ++++++++++++++++++
3 files changed, 368 insertions(+)
create mode 100644 llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index cbb096ba20ae67b..d9522eb5e2dbc87 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1082,6 +1082,13 @@ def TuneShortForwardBranchOpt
def HasShortForwardBranchOpt : Predicate<"Subtarget->hasShortForwardBranchOpt()">;
def NoShortForwardBranchOpt : Predicate<"!Subtarget->hasShortForwardBranchOpt()">;
+// Some subtargets require a S2V transfer buffer to move scalars into vectors.
+// FIXME: Forming .vx/.vf can reduce register pressure.
+def TuneDontSinkSplatOperands
+ : SubtargetFeature<"dont-sink-splat-operands", "DontSinkSplatOperands",
+ "true", "Don't sink splat operands to enable .vx or .vf "
+ "instructions">;
+
def TuneConditionalCompressedMoveFusion
: SubtargetFeature<"conditional-cmv-fusion", "HasConditionalCompressedMoveFusion",
"true", "Enable branch+c.mv fusion">;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b41e2f40dc72f01..6737f1c16238909 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -2000,6 +2000,14 @@ bool RISCVTargetLowering::shouldSinkOperands(
if (!I->getType()->isVectorTy() || !Subtarget.hasVInstructions())
return false;
+ // Don't sink splat operands if the target prefers it. Some targets requires
+ // S2V transfer buffers and we can run out of them copying the same value
+ // repeatedly.
+ // FIXME: It could still be worth doing if it would improve vector register
+ // pressure and prevent a vector spill.
+ if (Subtarget.dontSinkSplatOperands())
+ return false;
+
for (auto OpIdx : enumerate(I->operands())) {
if (!canSplatOperand(I, OpIdx.index()))
continue;
diff --git a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
new file mode 100644
index 000000000000000..38c1ee6a9c71a5e
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
@@ -0,0 +1,353 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+f -target-abi=lp64f \
+; RUN: -mattr=+dont-sink-splat-operands -riscv-v-vector-bits-min=128 | FileCheck %s
+
+; Test that we don't sink splat operands when compiling with dont-sink-splat-operands.
+; Each scalar register access requires a S2V transfer buffer entry. Using too many
+; limits performance.
+; FIXME: This is potentially bad for register pressure. Need a better heuristic.
+
+define void @sink_splat_add(i32* nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_add:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: lui a1, 1
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: .LBB0_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v9, (a0)
+; CHECK-NEXT: vadd.vv v9, v9, v8
+; CHECK-NEXT: vse32.v v9, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a1, .LBB0_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, i32* %a, i64 %index
+ %1 = bitcast i32* %0 to <4 x i32>*
+ %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+ %2 = add <4 x i32> %wide.load, %broadcast.splat
+ %3 = bitcast i32* %0 to <4 x i32>*
+ store <4 x i32> %2, <4 x i32>* %3, align 4
+ %index.next = add nuw i64 %index, 4
+ %4 = icmp eq i64 %index.next, 1024
+ br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+declare i64 @llvm.vscale.i64()
+
+define void @sink_splat_add_scalable(i32* nocapture %a, i32 signext %x) {
+; CHECK-LABEL: sink_splat_add_scalable:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: csrr a5, vlenb
+; CHECK-NEXT: srli a2, a5, 1
+; CHECK-NEXT: li a3, 1024
+; CHECK-NEXT: bgeu a3, a2, .LBB1_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a3, 0
+; CHECK-NEXT: j .LBB1_5
+; CHECK-NEXT: .LBB1_2: # %vector.ph
+; CHECK-NEXT: addi a3, a2, -1
+; CHECK-NEXT: andi a4, a3, 1024
+; CHECK-NEXT: xori a3, a4, 1024
+; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: slli a5, a5, 1
+; CHECK-NEXT: mv a6, a0
+; CHECK-NEXT: mv a7, a3
+; CHECK-NEXT: .LBB1_3: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vl2re32.v v10, (a6)
+; CHECK-NEXT: vadd.vv v10, v10, v8
+; CHECK-NEXT: vs2r.v v10, (a6)
+; CHECK-NEXT: sub a7, a7, a2
+; CHECK-NEXT: add a6, a6, a5
+; CHECK-NEXT: bnez a7, .LBB1_3
+; CHECK-NEXT: # %bb.4: # %middle.block
+; CHECK-NEXT: beqz a4, .LBB1_7
+; CHECK-NEXT: .LBB1_5: # %for.body.preheader
+; CHECK-NEXT: slli a2, a3, 2
+; CHECK-NEXT: add a2, a0, a2
+; CHECK-NEXT: lui a3, 1
+; CHECK-NEXT: add a0, a0, a3
+; CHECK-NEXT: .LBB1_6: # %for.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: lw a3, 0(a2)
+; CHECK-NEXT: add a3, a3, a1
+; CHECK-NEXT: sw a3, 0(a2)
+; CHECK-NEXT: addi a2, a2, 4
+; CHECK-NEXT: bne a2, a0, .LBB1_6
+; CHECK-NEXT: .LBB1_7: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %0 = call i64 @llvm.vscale.i64()
+ %1 = shl i64 %0, 2
+ %min.iters.check = icmp ugt i64 %1, 1024
+ br i1 %min.iters.check, label %for.body.preheader, label %vector.ph
+
+vector.ph: ; preds = %entry
+ %2 = call i64 @llvm.vscale.i64()
+ %3 = shl i64 %2, 2
+ %n.mod.vf = urem i64 1024, %3
+ %n.vec = sub nsw i64 1024, %n.mod.vf
+ %broadcast.splatinsert = insertelement <vscale x 4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <vscale x 4 x i32> %broadcast.splatinsert, <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
+ %4 = call i64 @llvm.vscale.i64()
+ %5 = shl i64 %4, 2
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %6 = getelementptr inbounds i32, i32* %a, i64 %index
+ %7 = bitcast i32* %6 to <vscale x 4 x i32>*
+ %wide.load = load <vscale x 4 x i32>, <vscale x 4 x i32>* %7, align 4
+ %8 = add <vscale x 4 x i32> %wide.load, %broadcast.splat
+ %9 = bitcast i32* %6 to <vscale x 4 x i32>*
+ store <vscale x 4 x i32> %8, <vscale x 4 x i32>* %9, align 4
+ %index.next = add nuw i64 %index, %5
+ %10 = icmp eq i64 %index.next, %n.vec
+ br i1 %10, label %middle.block, label %vector.body
+
+middle.block: ; preds = %vector.body
+ %cmp.n = icmp eq i64 %n.mod.vf, 0
+ br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader: ; preds = %entry, %middle.block
+ %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %middle.block
+ ret void
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds i32, i32* %a, i64 %indvars.iv
+ %11 = load i32, i32* %arrayidx, align 4
+ %add = add i32 %11, %x
+ store i32 %add, i32* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %cmp.not = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %cmp.not, label %for.cond.cleanup, label %for.body
+}
+
+declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
+
+define void @sink_splat_vp_add(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_add:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.x v8, a1
+; CHECK-NEXT: lui a1, 1
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: .LBB2_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v9, (a0)
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; CHECK-NEXT: vadd.vv v9, v9, v8, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v9, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a1, .LBB2_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
+ %broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds i32, i32* %a, i64 %index
+ %1 = bitcast i32* %0 to <4 x i32>*
+ %wide.load = load <4 x i32>, <4 x i32>* %1, align 4
+ %2 = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %wide.load, <4 x i32> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ %3 = bitcast i32* %0 to <4 x i32>*
+ store <4 x i32> %2, <4 x i32>* %3, align 4
+ %index.next = add nuw i64 %index, 4
+ %4 = icmp eq i64 %index.next, 1024
+ br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_fadd(float* nocapture %a, float %x) {
+; CHECK-LABEL: sink_splat_fadd:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: lui a1, 1
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: .LBB3_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v9, (a0)
+; CHECK-NEXT: vfadd.vv v9, v9, v8
+; CHECK-NEXT: vse32.v v9, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a1, .LBB3_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
+ %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds float, float* %a, i64 %index
+ %1 = bitcast float* %0 to <4 x float>*
+ %wide.load = load <4 x float>, <4 x float>* %1, align 4
+ %2 = fadd <4 x float> %wide.load, %broadcast.splat
+ %3 = bitcast float* %0 to <4 x float>*
+ store <4 x float> %2, <4 x float>* %3, align 4
+ %index.next = add nuw i64 %index, 4
+ %4 = icmp eq i64 %index.next, 1024
+ br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
+
+define void @sink_splat_fadd_scalable(float* nocapture %a, float %x) {
+; CHECK-LABEL: sink_splat_fadd_scalable:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a2, a1, 2
+; CHECK-NEXT: li a3, 1024
+; CHECK-NEXT: bgeu a3, a2, .LBB4_2
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: li a3, 0
+; CHECK-NEXT: j .LBB4_5
+; CHECK-NEXT: .LBB4_2: # %vector.ph
+; CHECK-NEXT: addi a3, a2, -1
+; CHECK-NEXT: andi a4, a3, 1024
+; CHECK-NEXT: xori a3, a4, 1024
+; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: mv a5, a0
+; CHECK-NEXT: mv a6, a3
+; CHECK-NEXT: .LBB4_3: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vl1re32.v v9, (a5)
+; CHECK-NEXT: vfadd.vv v9, v9, v8
+; CHECK-NEXT: vs1r.v v9, (a5)
+; CHECK-NEXT: sub a6, a6, a2
+; CHECK-NEXT: add a5, a5, a1
+; CHECK-NEXT: bnez a6, .LBB4_3
+; CHECK-NEXT: # %bb.4: # %middle.block
+; CHECK-NEXT: beqz a4, .LBB4_7
+; CHECK-NEXT: .LBB4_5: # %for.body.preheader
+; CHECK-NEXT: slli a1, a3, 2
+; CHECK-NEXT: add a1, a0, a1
+; CHECK-NEXT: lui a2, 1
+; CHECK-NEXT: add a0, a0, a2
+; CHECK-NEXT: .LBB4_6: # %for.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: flw fa5, 0(a1)
+; CHECK-NEXT: fadd.s fa5, fa5, fa0
+; CHECK-NEXT: fsw fa5, 0(a1)
+; CHECK-NEXT: addi a1, a1, 4
+; CHECK-NEXT: bne a1, a0, .LBB4_6
+; CHECK-NEXT: .LBB4_7: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %0 = call i64 @llvm.vscale.i64()
+ %1 = shl i64 %0, 1
+ %min.iters.check = icmp ugt i64 %1, 1024
+ br i1 %min.iters.check, label %for.body.preheader, label %vector.ph
+
+vector.ph: ; preds = %entry
+ %2 = call i64 @llvm.vscale.i64()
+ %3 = shl i64 %2, 1
+ %n.mod.vf = urem i64 1024, %3
+ %n.vec = sub nsw i64 1024, %n.mod.vf
+ %broadcast.splatinsert = insertelement <vscale x 2 x float> poison, float %x, i32 0
+ %broadcast.splat = shufflevector <vscale x 2 x float> %broadcast.splatinsert, <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer
+ %4 = call i64 @llvm.vscale.i64()
+ %5 = shl i64 %4, 1
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %vector.ph
+ %index = phi i64 [ 0, %vector.ph ], [ %index.next, %vector.body ]
+ %6 = getelementptr inbounds float, float* %a, i64 %index
+ %7 = bitcast float* %6 to <vscale x 2 x float>*
+ %wide.load = load <vscale x 2 x float>, <vscale x 2 x float>* %7, align 4
+ %8 = fadd <vscale x 2 x float> %wide.load, %broadcast.splat
+ %9 = bitcast float* %6 to <vscale x 2 x float>*
+ store <vscale x 2 x float> %8, <vscale x 2 x float>* %9, align 4
+ %index.next = add nuw i64 %index, %5
+ %10 = icmp eq i64 %index.next, %n.vec
+ br i1 %10, label %middle.block, label %vector.body
+
+middle.block: ; preds = %vector.body
+ %cmp.n = icmp eq i64 %n.mod.vf, 0
+ br i1 %cmp.n, label %for.cond.cleanup, label %for.body.preheader
+
+for.body.preheader: ; preds = %entry, %middle.block
+ %indvars.iv.ph = phi i64 [ 0, %entry ], [ %n.vec, %middle.block ]
+ br label %for.body
+
+for.cond.cleanup: ; preds = %for.body, %middle.block
+ ret void
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ %indvars.iv.next, %for.body ], [ %indvars.iv.ph, %for.body.preheader ]
+ %arrayidx = getelementptr inbounds float, float* %a, i64 %indvars.iv
+ %11 = load float, float* %arrayidx, align 4
+ %mul = fadd float %11, %x
+ store float %mul, float* %arrayidx, align 4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %cmp.not = icmp eq i64 %indvars.iv.next, 1024
+ br i1 %cmp.not, label %for.cond.cleanup, label %for.body
+}
+
+declare <4 x float> @llvm.vp.fadd.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32)
+
+define void @sink_splat_vp_fadd(float* nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: sink_splat_vp_fadd:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vfmv.v.f v8, fa0
+; CHECK-NEXT: lui a2, 1
+; CHECK-NEXT: add a2, a0, a2
+; CHECK-NEXT: .LBB5_1: # %vector.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: vle32.v v9, (a0)
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vfadd.vv v9, v9, v8, v0.t
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT: vse32.v v9, (a0)
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: bne a0, a2, .LBB5_1
+; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
+; CHECK-NEXT: ret
+entry:
+ %broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
+ %broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
+ br label %vector.body
+
+vector.body: ; preds = %vector.body, %entry
+ %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
+ %0 = getelementptr inbounds float, float* %a, i64 %index
+ %1 = bitcast float* %0 to <4 x float>*
+ %wide.load = load <4 x float>, <4 x float>* %1, align 4
+ %2 = call <4 x float> @llvm.vp.fadd.v4i32(<4 x float> %wide.load, <4 x float> %broadcast.splat, <4 x i1> %m, i32 %vl)
+ %3 = bitcast float* %0 to <4 x float>*
+ store <4 x float> %2, <4 x float>* %3, align 4
+ %index.next = add nuw i64 %index, 4
+ %4 = icmp eq i64 %index.next, 1024
+ br i1 %4, label %for.cond.cleanup, label %vector.body
+
+for.cond.cleanup: ; preds = %vector.body
+ ret void
+}
>From d0cab7b18959138c7f083f24c24bf595b7940c49 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 24 Jan 2024 11:06:26 -0800
Subject: [PATCH 2/5] !fixme update description to include wx and wf
---
llvm/lib/Target/RISCV/RISCVFeatures.td | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index d9522eb5e2dbc87..0b10fb9b416e44e 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1083,11 +1083,11 @@ def HasShortForwardBranchOpt : Predicate<"Subtarget->hasShortForwardBranchOpt()"
def NoShortForwardBranchOpt : Predicate<"!Subtarget->hasShortForwardBranchOpt()">;
// Some subtargets require a S2V transfer buffer to move scalars into vectors.
-// FIXME: Forming .vx/.vf can reduce register pressure.
+// FIXME: Forming .vx/.vf/.wx/.wf can reduce register pressure.
def TuneDontSinkSplatOperands
: SubtargetFeature<"dont-sink-splat-operands", "DontSinkSplatOperands",
- "true", "Don't sink splat operands to enable .vx or .vf "
- "instructions">;
+ "true", "Don't sink splat operands to enable .vx, .vf,"
+ ".wx, and .wf instructions">;
def TuneConditionalCompressedMoveFusion
: SubtargetFeature<"conditional-cmv-fusion", "HasConditionalCompressedMoveFusion",
>From 8115ab4b9f9d0d19fac22d4d5f81a07beb2514e9 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Wed, 24 Jan 2024 11:13:23 -0800
Subject: [PATCH 3/5] !fixup add RUN with sinking
---
.../RISCV/rvv/dont-sink-splat-operands.ll | 447 ++++++++++++------
1 file changed, 299 insertions(+), 148 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
index 38c1ee6a9c71a5e..aeba64cade0e18f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
@@ -1,6 +1,10 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+f -target-abi=lp64f \
-; RUN: -mattr=+dont-sink-splat-operands -riscv-v-vector-bits-min=128 | FileCheck %s
+; RUN: -mattr=+dont-sink-splat-operands -riscv-v-vector-bits-min=128 \
+; RUN: | FileCheck -check-prefix=DONT-SINK %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+f -target-abi=lp64f \
+; RUN: -mattr=-dont-sink-splat-operands -riscv-v-vector-bits-min=128 \
+; RUN: | FileCheck -check-prefix=SINK %s
; Test that we don't sink splat operands when compiling with dont-sink-splat-operands.
; Each scalar register access requires a S2V transfer buffer entry. Using too many
@@ -8,21 +12,36 @@
; FIXME: This is potentially bad for register pressure. Need a better heuristic.
define void @sink_splat_add(i32* nocapture %a, i32 signext %x) {
-; CHECK-LABEL: sink_splat_add:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v8, a1
-; CHECK-NEXT: lui a1, 1
-; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: .LBB0_1: # %vector.body
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vadd.vv v9, v9, v8
-; CHECK-NEXT: vse32.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a1, .LBB0_1
-; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
-; CHECK-NEXT: ret
+; DONT-SINK-LABEL: sink_splat_add:
+; DONT-SINK: # %bb.0: # %entry
+; DONT-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; DONT-SINK-NEXT: vmv.v.x v8, a1
+; DONT-SINK-NEXT: lui a1, 1
+; DONT-SINK-NEXT: add a1, a0, a1
+; DONT-SINK-NEXT: .LBB0_1: # %vector.body
+; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; DONT-SINK-NEXT: vle32.v v9, (a0)
+; DONT-SINK-NEXT: vadd.vv v9, v9, v8
+; DONT-SINK-NEXT: vse32.v v9, (a0)
+; DONT-SINK-NEXT: addi a0, a0, 16
+; DONT-SINK-NEXT: bne a0, a1, .LBB0_1
+; DONT-SINK-NEXT: # %bb.2: # %for.cond.cleanup
+; DONT-SINK-NEXT: ret
+;
+; SINK-LABEL: sink_splat_add:
+; SINK: # %bb.0: # %entry
+; SINK-NEXT: lui a2, 1
+; SINK-NEXT: add a2, a0, a2
+; SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; SINK-NEXT: .LBB0_1: # %vector.body
+; SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; SINK-NEXT: vle32.v v8, (a0)
+; SINK-NEXT: vadd.vx v8, v8, a1
+; SINK-NEXT: vse32.v v8, (a0)
+; SINK-NEXT: addi a0, a0, 16
+; SINK-NEXT: bne a0, a2, .LBB0_1
+; SINK-NEXT: # %bb.2: # %for.cond.cleanup
+; SINK-NEXT: ret
entry:
%broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
%broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
@@ -47,48 +66,90 @@ for.cond.cleanup: ; preds = %vector.body
declare i64 @llvm.vscale.i64()
define void @sink_splat_add_scalable(i32* nocapture %a, i32 signext %x) {
-; CHECK-LABEL: sink_splat_add_scalable:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrr a5, vlenb
-; CHECK-NEXT: srli a2, a5, 1
-; CHECK-NEXT: li a3, 1024
-; CHECK-NEXT: bgeu a3, a2, .LBB1_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a3, 0
-; CHECK-NEXT: j .LBB1_5
-; CHECK-NEXT: .LBB1_2: # %vector.ph
-; CHECK-NEXT: addi a3, a2, -1
-; CHECK-NEXT: andi a4, a3, 1024
-; CHECK-NEXT: xori a3, a4, 1024
-; CHECK-NEXT: vsetvli a6, zero, e32, m2, ta, ma
-; CHECK-NEXT: vmv.v.x v8, a1
-; CHECK-NEXT: slli a5, a5, 1
-; CHECK-NEXT: mv a6, a0
-; CHECK-NEXT: mv a7, a3
-; CHECK-NEXT: .LBB1_3: # %vector.body
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vl2re32.v v10, (a6)
-; CHECK-NEXT: vadd.vv v10, v10, v8
-; CHECK-NEXT: vs2r.v v10, (a6)
-; CHECK-NEXT: sub a7, a7, a2
-; CHECK-NEXT: add a6, a6, a5
-; CHECK-NEXT: bnez a7, .LBB1_3
-; CHECK-NEXT: # %bb.4: # %middle.block
-; CHECK-NEXT: beqz a4, .LBB1_7
-; CHECK-NEXT: .LBB1_5: # %for.body.preheader
-; CHECK-NEXT: slli a2, a3, 2
-; CHECK-NEXT: add a2, a0, a2
-; CHECK-NEXT: lui a3, 1
-; CHECK-NEXT: add a0, a0, a3
-; CHECK-NEXT: .LBB1_6: # %for.body
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: lw a3, 0(a2)
-; CHECK-NEXT: add a3, a3, a1
-; CHECK-NEXT: sw a3, 0(a2)
-; CHECK-NEXT: addi a2, a2, 4
-; CHECK-NEXT: bne a2, a0, .LBB1_6
-; CHECK-NEXT: .LBB1_7: # %for.cond.cleanup
-; CHECK-NEXT: ret
+; DONT-SINK-LABEL: sink_splat_add_scalable:
+; DONT-SINK: # %bb.0: # %entry
+; DONT-SINK-NEXT: csrr a5, vlenb
+; DONT-SINK-NEXT: srli a2, a5, 1
+; DONT-SINK-NEXT: li a3, 1024
+; DONT-SINK-NEXT: bgeu a3, a2, .LBB1_2
+; DONT-SINK-NEXT: # %bb.1:
+; DONT-SINK-NEXT: li a3, 0
+; DONT-SINK-NEXT: j .LBB1_5
+; DONT-SINK-NEXT: .LBB1_2: # %vector.ph
+; DONT-SINK-NEXT: addi a3, a2, -1
+; DONT-SINK-NEXT: andi a4, a3, 1024
+; DONT-SINK-NEXT: xori a3, a4, 1024
+; DONT-SINK-NEXT: vsetvli a6, zero, e32, m2, ta, ma
+; DONT-SINK-NEXT: vmv.v.x v8, a1
+; DONT-SINK-NEXT: slli a5, a5, 1
+; DONT-SINK-NEXT: mv a6, a0
+; DONT-SINK-NEXT: mv a7, a3
+; DONT-SINK-NEXT: .LBB1_3: # %vector.body
+; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; DONT-SINK-NEXT: vl2re32.v v10, (a6)
+; DONT-SINK-NEXT: vadd.vv v10, v10, v8
+; DONT-SINK-NEXT: vs2r.v v10, (a6)
+; DONT-SINK-NEXT: sub a7, a7, a2
+; DONT-SINK-NEXT: add a6, a6, a5
+; DONT-SINK-NEXT: bnez a7, .LBB1_3
+; DONT-SINK-NEXT: # %bb.4: # %middle.block
+; DONT-SINK-NEXT: beqz a4, .LBB1_7
+; DONT-SINK-NEXT: .LBB1_5: # %for.body.preheader
+; DONT-SINK-NEXT: slli a2, a3, 2
+; DONT-SINK-NEXT: add a2, a0, a2
+; DONT-SINK-NEXT: lui a3, 1
+; DONT-SINK-NEXT: add a0, a0, a3
+; DONT-SINK-NEXT: .LBB1_6: # %for.body
+; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; DONT-SINK-NEXT: lw a3, 0(a2)
+; DONT-SINK-NEXT: add a3, a3, a1
+; DONT-SINK-NEXT: sw a3, 0(a2)
+; DONT-SINK-NEXT: addi a2, a2, 4
+; DONT-SINK-NEXT: bne a2, a0, .LBB1_6
+; DONT-SINK-NEXT: .LBB1_7: # %for.cond.cleanup
+; DONT-SINK-NEXT: ret
+;
+; SINK-LABEL: sink_splat_add_scalable:
+; SINK: # %bb.0: # %entry
+; SINK-NEXT: csrr a5, vlenb
+; SINK-NEXT: srli a2, a5, 1
+; SINK-NEXT: li a3, 1024
+; SINK-NEXT: bgeu a3, a2, .LBB1_2
+; SINK-NEXT: # %bb.1:
+; SINK-NEXT: li a3, 0
+; SINK-NEXT: j .LBB1_5
+; SINK-NEXT: .LBB1_2: # %vector.ph
+; SINK-NEXT: addi a3, a2, -1
+; SINK-NEXT: andi a4, a3, 1024
+; SINK-NEXT: xori a3, a4, 1024
+; SINK-NEXT: slli a5, a5, 1
+; SINK-NEXT: vsetvli a6, zero, e32, m2, ta, ma
+; SINK-NEXT: mv a6, a0
+; SINK-NEXT: mv a7, a3
+; SINK-NEXT: .LBB1_3: # %vector.body
+; SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; SINK-NEXT: vl2re32.v v8, (a6)
+; SINK-NEXT: vadd.vx v8, v8, a1
+; SINK-NEXT: vs2r.v v8, (a6)
+; SINK-NEXT: sub a7, a7, a2
+; SINK-NEXT: add a6, a6, a5
+; SINK-NEXT: bnez a7, .LBB1_3
+; SINK-NEXT: # %bb.4: # %middle.block
+; SINK-NEXT: beqz a4, .LBB1_7
+; SINK-NEXT: .LBB1_5: # %for.body.preheader
+; SINK-NEXT: slli a2, a3, 2
+; SINK-NEXT: add a2, a0, a2
+; SINK-NEXT: lui a3, 1
+; SINK-NEXT: add a0, a0, a3
+; SINK-NEXT: .LBB1_6: # %for.body
+; SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; SINK-NEXT: lw a3, 0(a2)
+; SINK-NEXT: add a3, a3, a1
+; SINK-NEXT: sw a3, 0(a2)
+; SINK-NEXT: addi a2, a2, 4
+; SINK-NEXT: bne a2, a0, .LBB1_6
+; SINK-NEXT: .LBB1_7: # %for.cond.cleanup
+; SINK-NEXT: ret
entry:
%0 = call i64 @llvm.vscale.i64()
%1 = shl i64 %0, 2
@@ -143,23 +204,40 @@ for.body: ; preds = %for.body.preheader,
declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @sink_splat_vp_add(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: sink_splat_vp_add:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vmv.v.x v8, a1
-; CHECK-NEXT: lui a1, 1
-; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: .LBB2_1: # %vector.body
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
-; CHECK-NEXT: vadd.vv v9, v9, v8, v0.t
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a1, .LBB2_1
-; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
-; CHECK-NEXT: ret
+; DONT-SINK-LABEL: sink_splat_vp_add:
+; DONT-SINK: # %bb.0: # %entry
+; DONT-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; DONT-SINK-NEXT: vmv.v.x v8, a1
+; DONT-SINK-NEXT: lui a1, 1
+; DONT-SINK-NEXT: add a1, a0, a1
+; DONT-SINK-NEXT: .LBB2_1: # %vector.body
+; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; DONT-SINK-NEXT: vle32.v v9, (a0)
+; DONT-SINK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; DONT-SINK-NEXT: vadd.vv v9, v9, v8, v0.t
+; DONT-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; DONT-SINK-NEXT: vse32.v v9, (a0)
+; DONT-SINK-NEXT: addi a0, a0, 16
+; DONT-SINK-NEXT: bne a0, a1, .LBB2_1
+; DONT-SINK-NEXT: # %bb.2: # %for.cond.cleanup
+; DONT-SINK-NEXT: ret
+;
+; SINK-LABEL: sink_splat_vp_add:
+; SINK: # %bb.0: # %entry
+; SINK-NEXT: lui a3, 1
+; SINK-NEXT: add a3, a0, a3
+; SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; SINK-NEXT: .LBB2_1: # %vector.body
+; SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; SINK-NEXT: vle32.v v8, (a0)
+; SINK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; SINK-NEXT: vadd.vx v8, v8, a1, v0.t
+; SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; SINK-NEXT: vse32.v v8, (a0)
+; SINK-NEXT: addi a0, a0, 16
+; SINK-NEXT: bne a0, a3, .LBB2_1
+; SINK-NEXT: # %bb.2: # %for.cond.cleanup
+; SINK-NEXT: ret
entry:
%broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
%broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
@@ -182,21 +260,36 @@ for.cond.cleanup: ; preds = %vector.body
}
define void @sink_splat_fadd(float* nocapture %a, float %x) {
-; CHECK-LABEL: sink_splat_fadd:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vfmv.v.f v8, fa0
-; CHECK-NEXT: lui a1, 1
-; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: .LBB3_1: # %vector.body
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vfadd.vv v9, v9, v8
-; CHECK-NEXT: vse32.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a1, .LBB3_1
-; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
-; CHECK-NEXT: ret
+; DONT-SINK-LABEL: sink_splat_fadd:
+; DONT-SINK: # %bb.0: # %entry
+; DONT-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; DONT-SINK-NEXT: vfmv.v.f v8, fa0
+; DONT-SINK-NEXT: lui a1, 1
+; DONT-SINK-NEXT: add a1, a0, a1
+; DONT-SINK-NEXT: .LBB3_1: # %vector.body
+; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; DONT-SINK-NEXT: vle32.v v9, (a0)
+; DONT-SINK-NEXT: vfadd.vv v9, v9, v8
+; DONT-SINK-NEXT: vse32.v v9, (a0)
+; DONT-SINK-NEXT: addi a0, a0, 16
+; DONT-SINK-NEXT: bne a0, a1, .LBB3_1
+; DONT-SINK-NEXT: # %bb.2: # %for.cond.cleanup
+; DONT-SINK-NEXT: ret
+;
+; SINK-LABEL: sink_splat_fadd:
+; SINK: # %bb.0: # %entry
+; SINK-NEXT: lui a1, 1
+; SINK-NEXT: add a1, a0, a1
+; SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; SINK-NEXT: .LBB3_1: # %vector.body
+; SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; SINK-NEXT: vle32.v v8, (a0)
+; SINK-NEXT: vfadd.vf v8, v8, fa0
+; SINK-NEXT: vse32.v v8, (a0)
+; SINK-NEXT: addi a0, a0, 16
+; SINK-NEXT: bne a0, a1, .LBB3_1
+; SINK-NEXT: # %bb.2: # %for.cond.cleanup
+; SINK-NEXT: ret
entry:
%broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
%broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
@@ -219,47 +312,88 @@ for.cond.cleanup: ; preds = %vector.body
}
define void @sink_splat_fadd_scalable(float* nocapture %a, float %x) {
-; CHECK-LABEL: sink_splat_fadd_scalable:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a2, a1, 2
-; CHECK-NEXT: li a3, 1024
-; CHECK-NEXT: bgeu a3, a2, .LBB4_2
-; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a3, 0
-; CHECK-NEXT: j .LBB4_5
-; CHECK-NEXT: .LBB4_2: # %vector.ph
-; CHECK-NEXT: addi a3, a2, -1
-; CHECK-NEXT: andi a4, a3, 1024
-; CHECK-NEXT: xori a3, a4, 1024
-; CHECK-NEXT: vsetvli a5, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfmv.v.f v8, fa0
-; CHECK-NEXT: mv a5, a0
-; CHECK-NEXT: mv a6, a3
-; CHECK-NEXT: .LBB4_3: # %vector.body
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vl1re32.v v9, (a5)
-; CHECK-NEXT: vfadd.vv v9, v9, v8
-; CHECK-NEXT: vs1r.v v9, (a5)
-; CHECK-NEXT: sub a6, a6, a2
-; CHECK-NEXT: add a5, a5, a1
-; CHECK-NEXT: bnez a6, .LBB4_3
-; CHECK-NEXT: # %bb.4: # %middle.block
-; CHECK-NEXT: beqz a4, .LBB4_7
-; CHECK-NEXT: .LBB4_5: # %for.body.preheader
-; CHECK-NEXT: slli a1, a3, 2
-; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: lui a2, 1
-; CHECK-NEXT: add a0, a0, a2
-; CHECK-NEXT: .LBB4_6: # %for.body
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: flw fa5, 0(a1)
-; CHECK-NEXT: fadd.s fa5, fa5, fa0
-; CHECK-NEXT: fsw fa5, 0(a1)
-; CHECK-NEXT: addi a1, a1, 4
-; CHECK-NEXT: bne a1, a0, .LBB4_6
-; CHECK-NEXT: .LBB4_7: # %for.cond.cleanup
-; CHECK-NEXT: ret
+; DONT-SINK-LABEL: sink_splat_fadd_scalable:
+; DONT-SINK: # %bb.0: # %entry
+; DONT-SINK-NEXT: csrr a1, vlenb
+; DONT-SINK-NEXT: srli a2, a1, 2
+; DONT-SINK-NEXT: li a3, 1024
+; DONT-SINK-NEXT: bgeu a3, a2, .LBB4_2
+; DONT-SINK-NEXT: # %bb.1:
+; DONT-SINK-NEXT: li a3, 0
+; DONT-SINK-NEXT: j .LBB4_5
+; DONT-SINK-NEXT: .LBB4_2: # %vector.ph
+; DONT-SINK-NEXT: addi a3, a2, -1
+; DONT-SINK-NEXT: andi a4, a3, 1024
+; DONT-SINK-NEXT: xori a3, a4, 1024
+; DONT-SINK-NEXT: vsetvli a5, zero, e32, m1, ta, ma
+; DONT-SINK-NEXT: vfmv.v.f v8, fa0
+; DONT-SINK-NEXT: mv a5, a0
+; DONT-SINK-NEXT: mv a6, a3
+; DONT-SINK-NEXT: .LBB4_3: # %vector.body
+; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; DONT-SINK-NEXT: vl1re32.v v9, (a5)
+; DONT-SINK-NEXT: vfadd.vv v9, v9, v8
+; DONT-SINK-NEXT: vs1r.v v9, (a5)
+; DONT-SINK-NEXT: sub a6, a6, a2
+; DONT-SINK-NEXT: add a5, a5, a1
+; DONT-SINK-NEXT: bnez a6, .LBB4_3
+; DONT-SINK-NEXT: # %bb.4: # %middle.block
+; DONT-SINK-NEXT: beqz a4, .LBB4_7
+; DONT-SINK-NEXT: .LBB4_5: # %for.body.preheader
+; DONT-SINK-NEXT: slli a1, a3, 2
+; DONT-SINK-NEXT: add a1, a0, a1
+; DONT-SINK-NEXT: lui a2, 1
+; DONT-SINK-NEXT: add a0, a0, a2
+; DONT-SINK-NEXT: .LBB4_6: # %for.body
+; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; DONT-SINK-NEXT: flw fa5, 0(a1)
+; DONT-SINK-NEXT: fadd.s fa5, fa5, fa0
+; DONT-SINK-NEXT: fsw fa5, 0(a1)
+; DONT-SINK-NEXT: addi a1, a1, 4
+; DONT-SINK-NEXT: bne a1, a0, .LBB4_6
+; DONT-SINK-NEXT: .LBB4_7: # %for.cond.cleanup
+; DONT-SINK-NEXT: ret
+;
+; SINK-LABEL: sink_splat_fadd_scalable:
+; SINK: # %bb.0: # %entry
+; SINK-NEXT: csrr a1, vlenb
+; SINK-NEXT: srli a2, a1, 2
+; SINK-NEXT: li a3, 1024
+; SINK-NEXT: bgeu a3, a2, .LBB4_2
+; SINK-NEXT: # %bb.1:
+; SINK-NEXT: li a3, 0
+; SINK-NEXT: j .LBB4_5
+; SINK-NEXT: .LBB4_2: # %vector.ph
+; SINK-NEXT: addi a3, a2, -1
+; SINK-NEXT: andi a4, a3, 1024
+; SINK-NEXT: xori a3, a4, 1024
+; SINK-NEXT: vsetvli a5, zero, e32, m1, ta, ma
+; SINK-NEXT: mv a5, a0
+; SINK-NEXT: mv a6, a3
+; SINK-NEXT: .LBB4_3: # %vector.body
+; SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; SINK-NEXT: vl1re32.v v8, (a5)
+; SINK-NEXT: vfadd.vf v8, v8, fa0
+; SINK-NEXT: vs1r.v v8, (a5)
+; SINK-NEXT: sub a6, a6, a2
+; SINK-NEXT: add a5, a5, a1
+; SINK-NEXT: bnez a6, .LBB4_3
+; SINK-NEXT: # %bb.4: # %middle.block
+; SINK-NEXT: beqz a4, .LBB4_7
+; SINK-NEXT: .LBB4_5: # %for.body.preheader
+; SINK-NEXT: slli a1, a3, 2
+; SINK-NEXT: add a1, a0, a1
+; SINK-NEXT: lui a2, 1
+; SINK-NEXT: add a0, a0, a2
+; SINK-NEXT: .LBB4_6: # %for.body
+; SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; SINK-NEXT: flw fa5, 0(a1)
+; SINK-NEXT: fadd.s fa5, fa5, fa0
+; SINK-NEXT: fsw fa5, 0(a1)
+; SINK-NEXT: addi a1, a1, 4
+; SINK-NEXT: bne a1, a0, .LBB4_6
+; SINK-NEXT: .LBB4_7: # %for.cond.cleanup
+; SINK-NEXT: ret
entry:
%0 = call i64 @llvm.vscale.i64()
%1 = shl i64 %0, 1
@@ -314,23 +448,40 @@ for.body: ; preds = %for.body.preheader,
declare <4 x float> @llvm.vp.fadd.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32)
define void @sink_splat_vp_fadd(float* nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: sink_splat_vp_fadd:
-; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vfmv.v.f v8, fa0
-; CHECK-NEXT: lui a2, 1
-; CHECK-NEXT: add a2, a0, a2
-; CHECK-NEXT: .LBB5_1: # %vector.body
-; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT: vfadd.vv v9, v9, v8, v0.t
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vse32.v v9, (a0)
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: bne a0, a2, .LBB5_1
-; CHECK-NEXT: # %bb.2: # %for.cond.cleanup
-; CHECK-NEXT: ret
+; DONT-SINK-LABEL: sink_splat_vp_fadd:
+; DONT-SINK: # %bb.0: # %entry
+; DONT-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; DONT-SINK-NEXT: vfmv.v.f v8, fa0
+; DONT-SINK-NEXT: lui a2, 1
+; DONT-SINK-NEXT: add a2, a0, a2
+; DONT-SINK-NEXT: .LBB5_1: # %vector.body
+; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; DONT-SINK-NEXT: vle32.v v9, (a0)
+; DONT-SINK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; DONT-SINK-NEXT: vfadd.vv v9, v9, v8, v0.t
+; DONT-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; DONT-SINK-NEXT: vse32.v v9, (a0)
+; DONT-SINK-NEXT: addi a0, a0, 16
+; DONT-SINK-NEXT: bne a0, a2, .LBB5_1
+; DONT-SINK-NEXT: # %bb.2: # %for.cond.cleanup
+; DONT-SINK-NEXT: ret
+;
+; SINK-LABEL: sink_splat_vp_fadd:
+; SINK: # %bb.0: # %entry
+; SINK-NEXT: lui a2, 1
+; SINK-NEXT: add a2, a0, a2
+; SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; SINK-NEXT: .LBB5_1: # %vector.body
+; SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; SINK-NEXT: vle32.v v8, (a0)
+; SINK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; SINK-NEXT: vfadd.vf v8, v8, fa0, v0.t
+; SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; SINK-NEXT: vse32.v v8, (a0)
+; SINK-NEXT: addi a0, a0, 16
+; SINK-NEXT: bne a0, a2, .LBB5_1
+; SINK-NEXT: # %bb.2: # %for.cond.cleanup
+; SINK-NEXT: ret
entry:
%broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
%broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
>From 4ce060a81958b82be0ef06648bd09b7dbced56b9 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Thu, 25 Jan 2024 09:35:37 -0800
Subject: [PATCH 4/5] !fixup change feature name
---
llvm/lib/Target/RISCV/RISCVFeatures.td | 6 +-
llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 2 +-
.../RISCV/rvv/dont-sink-splat-operands.ll | 156 +++++++++++++++++-
3 files changed, 157 insertions(+), 7 deletions(-)
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 0b10fb9b416e44e..fce20cc9d4329a7 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -1084,9 +1084,9 @@ def NoShortForwardBranchOpt : Predicate<"!Subtarget->hasShortForwardBranchOpt()"
// Some subtargets require a S2V transfer buffer to move scalars into vectors.
// FIXME: Forming .vx/.vf/.wx/.wf can reduce register pressure.
-def TuneDontSinkSplatOperands
- : SubtargetFeature<"dont-sink-splat-operands", "DontSinkSplatOperands",
- "true", "Don't sink splat operands to enable .vx, .vf,"
+def TuneNoSinkSplatOperands
+ : SubtargetFeature<"no-sink-splat-operands", "SinkSplatOperands",
+ "false", "Disable sink splat operands to enable .vx, .vf,"
".wx, and .wf instructions">;
def TuneConditionalCompressedMoveFusion
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 6737f1c16238909..9987640e696d2a9 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -2005,7 +2005,7 @@ bool RISCVTargetLowering::shouldSinkOperands(
// repeatedly.
// FIXME: It could still be worth doing if it would improve vector register
// pressure and prevent a vector spill.
- if (Subtarget.dontSinkSplatOperands())
+ if (!Subtarget.sinkSplatOperands())
return false;
for (auto OpIdx : enumerate(I->operands())) {
diff --git a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
index aeba64cade0e18f..023927a5abff497 100644
--- a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
@@ -1,12 +1,15 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+f -target-abi=lp64f \
-; RUN: -mattr=+dont-sink-splat-operands -riscv-v-vector-bits-min=128 \
+; RUN: -mattr=+no-sink-splat-operands -riscv-v-vector-bits-min=128 \
; RUN: | FileCheck -check-prefix=DONT-SINK %s
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+f -target-abi=lp64f \
-; RUN: -mattr=-dont-sink-splat-operands -riscv-v-vector-bits-min=128 \
+; RUN: -mattr=-no-sink-splat-operands -riscv-v-vector-bits-min=128 \
; RUN: | FileCheck -check-prefix=SINK %s
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+f -target-abi=lp64f \
+; RUN: -riscv-v-vector-bits-min=128 \
+; RUN: | FileCheck -check-prefix=DEFAULT %s
-; Test that we don't sink splat operands when compiling with dont-sink-splat-operands.
+; Test that we don't sink splat operands when compiling with no-sink-splat-operands.
; Each scalar register access requires a S2V transfer buffer entry. Using too many
; limits performance.
; FIXME: This is potentially bad for register pressure. Need a better heuristic.
@@ -42,6 +45,21 @@ define void @sink_splat_add(i32* nocapture %a, i32 signext %x) {
; SINK-NEXT: bne a0, a2, .LBB0_1
; SINK-NEXT: # %bb.2: # %for.cond.cleanup
; SINK-NEXT: ret
+;
+; DEFAULT-LABEL: sink_splat_add:
+; DEFAULT: # %bb.0: # %entry
+; DEFAULT-NEXT: lui a2, 1
+; DEFAULT-NEXT: add a2, a0, a2
+; DEFAULT-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; DEFAULT-NEXT: .LBB0_1: # %vector.body
+; DEFAULT-NEXT: # =>This Inner Loop Header: Depth=1
+; DEFAULT-NEXT: vle32.v v8, (a0)
+; DEFAULT-NEXT: vadd.vx v8, v8, a1
+; DEFAULT-NEXT: vse32.v v8, (a0)
+; DEFAULT-NEXT: addi a0, a0, 16
+; DEFAULT-NEXT: bne a0, a2, .LBB0_1
+; DEFAULT-NEXT: # %bb.2: # %for.cond.cleanup
+; DEFAULT-NEXT: ret
entry:
%broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
%broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
@@ -150,6 +168,48 @@ define void @sink_splat_add_scalable(i32* nocapture %a, i32 signext %x) {
; SINK-NEXT: bne a2, a0, .LBB1_6
; SINK-NEXT: .LBB1_7: # %for.cond.cleanup
; SINK-NEXT: ret
+;
+; DEFAULT-LABEL: sink_splat_add_scalable:
+; DEFAULT: # %bb.0: # %entry
+; DEFAULT-NEXT: csrr a5, vlenb
+; DEFAULT-NEXT: srli a2, a5, 1
+; DEFAULT-NEXT: li a3, 1024
+; DEFAULT-NEXT: bgeu a3, a2, .LBB1_2
+; DEFAULT-NEXT: # %bb.1:
+; DEFAULT-NEXT: li a3, 0
+; DEFAULT-NEXT: j .LBB1_5
+; DEFAULT-NEXT: .LBB1_2: # %vector.ph
+; DEFAULT-NEXT: addi a3, a2, -1
+; DEFAULT-NEXT: andi a4, a3, 1024
+; DEFAULT-NEXT: xori a3, a4, 1024
+; DEFAULT-NEXT: slli a5, a5, 1
+; DEFAULT-NEXT: vsetvli a6, zero, e32, m2, ta, ma
+; DEFAULT-NEXT: mv a6, a0
+; DEFAULT-NEXT: mv a7, a3
+; DEFAULT-NEXT: .LBB1_3: # %vector.body
+; DEFAULT-NEXT: # =>This Inner Loop Header: Depth=1
+; DEFAULT-NEXT: vl2re32.v v8, (a6)
+; DEFAULT-NEXT: vadd.vx v8, v8, a1
+; DEFAULT-NEXT: vs2r.v v8, (a6)
+; DEFAULT-NEXT: sub a7, a7, a2
+; DEFAULT-NEXT: add a6, a6, a5
+; DEFAULT-NEXT: bnez a7, .LBB1_3
+; DEFAULT-NEXT: # %bb.4: # %middle.block
+; DEFAULT-NEXT: beqz a4, .LBB1_7
+; DEFAULT-NEXT: .LBB1_5: # %for.body.preheader
+; DEFAULT-NEXT: slli a2, a3, 2
+; DEFAULT-NEXT: add a2, a0, a2
+; DEFAULT-NEXT: lui a3, 1
+; DEFAULT-NEXT: add a0, a0, a3
+; DEFAULT-NEXT: .LBB1_6: # %for.body
+; DEFAULT-NEXT: # =>This Inner Loop Header: Depth=1
+; DEFAULT-NEXT: lw a3, 0(a2)
+; DEFAULT-NEXT: add a3, a3, a1
+; DEFAULT-NEXT: sw a3, 0(a2)
+; DEFAULT-NEXT: addi a2, a2, 4
+; DEFAULT-NEXT: bne a2, a0, .LBB1_6
+; DEFAULT-NEXT: .LBB1_7: # %for.cond.cleanup
+; DEFAULT-NEXT: ret
entry:
%0 = call i64 @llvm.vscale.i64()
%1 = shl i64 %0, 2
@@ -238,6 +298,23 @@ define void @sink_splat_vp_add(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i
; SINK-NEXT: bne a0, a3, .LBB2_1
; SINK-NEXT: # %bb.2: # %for.cond.cleanup
; SINK-NEXT: ret
+;
+; DEFAULT-LABEL: sink_splat_vp_add:
+; DEFAULT: # %bb.0: # %entry
+; DEFAULT-NEXT: lui a3, 1
+; DEFAULT-NEXT: add a3, a0, a3
+; DEFAULT-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; DEFAULT-NEXT: .LBB2_1: # %vector.body
+; DEFAULT-NEXT: # =>This Inner Loop Header: Depth=1
+; DEFAULT-NEXT: vle32.v v8, (a0)
+; DEFAULT-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; DEFAULT-NEXT: vadd.vx v8, v8, a1, v0.t
+; DEFAULT-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; DEFAULT-NEXT: vse32.v v8, (a0)
+; DEFAULT-NEXT: addi a0, a0, 16
+; DEFAULT-NEXT: bne a0, a3, .LBB2_1
+; DEFAULT-NEXT: # %bb.2: # %for.cond.cleanup
+; DEFAULT-NEXT: ret
entry:
%broadcast.splatinsert = insertelement <4 x i32> poison, i32 %x, i32 0
%broadcast.splat = shufflevector <4 x i32> %broadcast.splatinsert, <4 x i32> poison, <4 x i32> zeroinitializer
@@ -290,6 +367,21 @@ define void @sink_splat_fadd(float* nocapture %a, float %x) {
; SINK-NEXT: bne a0, a1, .LBB3_1
; SINK-NEXT: # %bb.2: # %for.cond.cleanup
; SINK-NEXT: ret
+;
+; DEFAULT-LABEL: sink_splat_fadd:
+; DEFAULT: # %bb.0: # %entry
+; DEFAULT-NEXT: lui a1, 1
+; DEFAULT-NEXT: add a1, a0, a1
+; DEFAULT-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; DEFAULT-NEXT: .LBB3_1: # %vector.body
+; DEFAULT-NEXT: # =>This Inner Loop Header: Depth=1
+; DEFAULT-NEXT: vle32.v v8, (a0)
+; DEFAULT-NEXT: vfadd.vf v8, v8, fa0
+; DEFAULT-NEXT: vse32.v v8, (a0)
+; DEFAULT-NEXT: addi a0, a0, 16
+; DEFAULT-NEXT: bne a0, a1, .LBB3_1
+; DEFAULT-NEXT: # %bb.2: # %for.cond.cleanup
+; DEFAULT-NEXT: ret
entry:
%broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
%broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
@@ -394,6 +486,47 @@ define void @sink_splat_fadd_scalable(float* nocapture %a, float %x) {
; SINK-NEXT: bne a1, a0, .LBB4_6
; SINK-NEXT: .LBB4_7: # %for.cond.cleanup
; SINK-NEXT: ret
+;
+; DEFAULT-LABEL: sink_splat_fadd_scalable:
+; DEFAULT: # %bb.0: # %entry
+; DEFAULT-NEXT: csrr a1, vlenb
+; DEFAULT-NEXT: srli a2, a1, 2
+; DEFAULT-NEXT: li a3, 1024
+; DEFAULT-NEXT: bgeu a3, a2, .LBB4_2
+; DEFAULT-NEXT: # %bb.1:
+; DEFAULT-NEXT: li a3, 0
+; DEFAULT-NEXT: j .LBB4_5
+; DEFAULT-NEXT: .LBB4_2: # %vector.ph
+; DEFAULT-NEXT: addi a3, a2, -1
+; DEFAULT-NEXT: andi a4, a3, 1024
+; DEFAULT-NEXT: xori a3, a4, 1024
+; DEFAULT-NEXT: vsetvli a5, zero, e32, m1, ta, ma
+; DEFAULT-NEXT: mv a5, a0
+; DEFAULT-NEXT: mv a6, a3
+; DEFAULT-NEXT: .LBB4_3: # %vector.body
+; DEFAULT-NEXT: # =>This Inner Loop Header: Depth=1
+; DEFAULT-NEXT: vl1re32.v v8, (a5)
+; DEFAULT-NEXT: vfadd.vf v8, v8, fa0
+; DEFAULT-NEXT: vs1r.v v8, (a5)
+; DEFAULT-NEXT: sub a6, a6, a2
+; DEFAULT-NEXT: add a5, a5, a1
+; DEFAULT-NEXT: bnez a6, .LBB4_3
+; DEFAULT-NEXT: # %bb.4: # %middle.block
+; DEFAULT-NEXT: beqz a4, .LBB4_7
+; DEFAULT-NEXT: .LBB4_5: # %for.body.preheader
+; DEFAULT-NEXT: slli a1, a3, 2
+; DEFAULT-NEXT: add a1, a0, a1
+; DEFAULT-NEXT: lui a2, 1
+; DEFAULT-NEXT: add a0, a0, a2
+; DEFAULT-NEXT: .LBB4_6: # %for.body
+; DEFAULT-NEXT: # =>This Inner Loop Header: Depth=1
+; DEFAULT-NEXT: flw fa5, 0(a1)
+; DEFAULT-NEXT: fadd.s fa5, fa5, fa0
+; DEFAULT-NEXT: fsw fa5, 0(a1)
+; DEFAULT-NEXT: addi a1, a1, 4
+; DEFAULT-NEXT: bne a1, a0, .LBB4_6
+; DEFAULT-NEXT: .LBB4_7: # %for.cond.cleanup
+; DEFAULT-NEXT: ret
entry:
%0 = call i64 @llvm.vscale.i64()
%1 = shl i64 %0, 1
@@ -482,6 +615,23 @@ define void @sink_splat_vp_fadd(float* nocapture %a, float %x, <4 x i1> %m, i32
; SINK-NEXT: bne a0, a2, .LBB5_1
; SINK-NEXT: # %bb.2: # %for.cond.cleanup
; SINK-NEXT: ret
+;
+; DEFAULT-LABEL: sink_splat_vp_fadd:
+; DEFAULT: # %bb.0: # %entry
+; DEFAULT-NEXT: lui a2, 1
+; DEFAULT-NEXT: add a2, a0, a2
+; DEFAULT-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; DEFAULT-NEXT: .LBB5_1: # %vector.body
+; DEFAULT-NEXT: # =>This Inner Loop Header: Depth=1
+; DEFAULT-NEXT: vle32.v v8, (a0)
+; DEFAULT-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; DEFAULT-NEXT: vfadd.vf v8, v8, fa0, v0.t
+; DEFAULT-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; DEFAULT-NEXT: vse32.v v8, (a0)
+; DEFAULT-NEXT: addi a0, a0, 16
+; DEFAULT-NEXT: bne a0, a2, .LBB5_1
+; DEFAULT-NEXT: # %bb.2: # %for.cond.cleanup
+; DEFAULT-NEXT: ret
entry:
%broadcast.splatinsert = insertelement <4 x float> poison, float %x, i32 0
%broadcast.splat = shufflevector <4 x float> %broadcast.splatinsert, <4 x float> poison, <4 x i32> zeroinitializer
>From 6fac8a367b0c2fa9f3279986defb2fa74620c189 Mon Sep 17 00:00:00 2001
From: Michael Maitland <michaeltmaitland at gmail.com>
Date: Thu, 25 Jan 2024 09:43:03 -0800
Subject: [PATCH 5/5] !fixup DONT->NO
---
.../RISCV/rvv/dont-sink-splat-operands.ll | 296 +++++++++---------
1 file changed, 148 insertions(+), 148 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
index 023927a5abff497..d27bc236fb4d8d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+f -target-abi=lp64f \
; RUN: -mattr=+no-sink-splat-operands -riscv-v-vector-bits-min=128 \
-; RUN: | FileCheck -check-prefix=DONT-SINK %s
+; RUN: | FileCheck -check-prefix=NO-SINK %s
; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v,+f -target-abi=lp64f \
; RUN: -mattr=-no-sink-splat-operands -riscv-v-vector-bits-min=128 \
; RUN: | FileCheck -check-prefix=SINK %s
@@ -15,21 +15,21 @@
; FIXME: This is potentially bad for register pressure. Need a better heuristic.
define void @sink_splat_add(i32* nocapture %a, i32 signext %x) {
-; DONT-SINK-LABEL: sink_splat_add:
-; DONT-SINK: # %bb.0: # %entry
-; DONT-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; DONT-SINK-NEXT: vmv.v.x v8, a1
-; DONT-SINK-NEXT: lui a1, 1
-; DONT-SINK-NEXT: add a1, a0, a1
-; DONT-SINK-NEXT: .LBB0_1: # %vector.body
-; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
-; DONT-SINK-NEXT: vle32.v v9, (a0)
-; DONT-SINK-NEXT: vadd.vv v9, v9, v8
-; DONT-SINK-NEXT: vse32.v v9, (a0)
-; DONT-SINK-NEXT: addi a0, a0, 16
-; DONT-SINK-NEXT: bne a0, a1, .LBB0_1
-; DONT-SINK-NEXT: # %bb.2: # %for.cond.cleanup
-; DONT-SINK-NEXT: ret
+; NO-SINK-LABEL: sink_splat_add:
+; NO-SINK: # %bb.0: # %entry
+; NO-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; NO-SINK-NEXT: vmv.v.x v8, a1
+; NO-SINK-NEXT: lui a1, 1
+; NO-SINK-NEXT: add a1, a0, a1
+; NO-SINK-NEXT: .LBB0_1: # %vector.body
+; NO-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; NO-SINK-NEXT: vle32.v v9, (a0)
+; NO-SINK-NEXT: vadd.vv v9, v9, v8
+; NO-SINK-NEXT: vse32.v v9, (a0)
+; NO-SINK-NEXT: addi a0, a0, 16
+; NO-SINK-NEXT: bne a0, a1, .LBB0_1
+; NO-SINK-NEXT: # %bb.2: # %for.cond.cleanup
+; NO-SINK-NEXT: ret
;
; SINK-LABEL: sink_splat_add:
; SINK: # %bb.0: # %entry
@@ -84,48 +84,48 @@ for.cond.cleanup: ; preds = %vector.body
declare i64 @llvm.vscale.i64()
define void @sink_splat_add_scalable(i32* nocapture %a, i32 signext %x) {
-; DONT-SINK-LABEL: sink_splat_add_scalable:
-; DONT-SINK: # %bb.0: # %entry
-; DONT-SINK-NEXT: csrr a5, vlenb
-; DONT-SINK-NEXT: srli a2, a5, 1
-; DONT-SINK-NEXT: li a3, 1024
-; DONT-SINK-NEXT: bgeu a3, a2, .LBB1_2
-; DONT-SINK-NEXT: # %bb.1:
-; DONT-SINK-NEXT: li a3, 0
-; DONT-SINK-NEXT: j .LBB1_5
-; DONT-SINK-NEXT: .LBB1_2: # %vector.ph
-; DONT-SINK-NEXT: addi a3, a2, -1
-; DONT-SINK-NEXT: andi a4, a3, 1024
-; DONT-SINK-NEXT: xori a3, a4, 1024
-; DONT-SINK-NEXT: vsetvli a6, zero, e32, m2, ta, ma
-; DONT-SINK-NEXT: vmv.v.x v8, a1
-; DONT-SINK-NEXT: slli a5, a5, 1
-; DONT-SINK-NEXT: mv a6, a0
-; DONT-SINK-NEXT: mv a7, a3
-; DONT-SINK-NEXT: .LBB1_3: # %vector.body
-; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
-; DONT-SINK-NEXT: vl2re32.v v10, (a6)
-; DONT-SINK-NEXT: vadd.vv v10, v10, v8
-; DONT-SINK-NEXT: vs2r.v v10, (a6)
-; DONT-SINK-NEXT: sub a7, a7, a2
-; DONT-SINK-NEXT: add a6, a6, a5
-; DONT-SINK-NEXT: bnez a7, .LBB1_3
-; DONT-SINK-NEXT: # %bb.4: # %middle.block
-; DONT-SINK-NEXT: beqz a4, .LBB1_7
-; DONT-SINK-NEXT: .LBB1_5: # %for.body.preheader
-; DONT-SINK-NEXT: slli a2, a3, 2
-; DONT-SINK-NEXT: add a2, a0, a2
-; DONT-SINK-NEXT: lui a3, 1
-; DONT-SINK-NEXT: add a0, a0, a3
-; DONT-SINK-NEXT: .LBB1_6: # %for.body
-; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
-; DONT-SINK-NEXT: lw a3, 0(a2)
-; DONT-SINK-NEXT: add a3, a3, a1
-; DONT-SINK-NEXT: sw a3, 0(a2)
-; DONT-SINK-NEXT: addi a2, a2, 4
-; DONT-SINK-NEXT: bne a2, a0, .LBB1_6
-; DONT-SINK-NEXT: .LBB1_7: # %for.cond.cleanup
-; DONT-SINK-NEXT: ret
+; NO-SINK-LABEL: sink_splat_add_scalable:
+; NO-SINK: # %bb.0: # %entry
+; NO-SINK-NEXT: csrr a5, vlenb
+; NO-SINK-NEXT: srli a2, a5, 1
+; NO-SINK-NEXT: li a3, 1024
+; NO-SINK-NEXT: bgeu a3, a2, .LBB1_2
+; NO-SINK-NEXT: # %bb.1:
+; NO-SINK-NEXT: li a3, 0
+; NO-SINK-NEXT: j .LBB1_5
+; NO-SINK-NEXT: .LBB1_2: # %vector.ph
+; NO-SINK-NEXT: addi a3, a2, -1
+; NO-SINK-NEXT: andi a4, a3, 1024
+; NO-SINK-NEXT: xori a3, a4, 1024
+; NO-SINK-NEXT: vsetvli a6, zero, e32, m2, ta, ma
+; NO-SINK-NEXT: vmv.v.x v8, a1
+; NO-SINK-NEXT: slli a5, a5, 1
+; NO-SINK-NEXT: mv a6, a0
+; NO-SINK-NEXT: mv a7, a3
+; NO-SINK-NEXT: .LBB1_3: # %vector.body
+; NO-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; NO-SINK-NEXT: vl2re32.v v10, (a6)
+; NO-SINK-NEXT: vadd.vv v10, v10, v8
+; NO-SINK-NEXT: vs2r.v v10, (a6)
+; NO-SINK-NEXT: sub a7, a7, a2
+; NO-SINK-NEXT: add a6, a6, a5
+; NO-SINK-NEXT: bnez a7, .LBB1_3
+; NO-SINK-NEXT: # %bb.4: # %middle.block
+; NO-SINK-NEXT: beqz a4, .LBB1_7
+; NO-SINK-NEXT: .LBB1_5: # %for.body.preheader
+; NO-SINK-NEXT: slli a2, a3, 2
+; NO-SINK-NEXT: add a2, a0, a2
+; NO-SINK-NEXT: lui a3, 1
+; NO-SINK-NEXT: add a0, a0, a3
+; NO-SINK-NEXT: .LBB1_6: # %for.body
+; NO-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; NO-SINK-NEXT: lw a3, 0(a2)
+; NO-SINK-NEXT: add a3, a3, a1
+; NO-SINK-NEXT: sw a3, 0(a2)
+; NO-SINK-NEXT: addi a2, a2, 4
+; NO-SINK-NEXT: bne a2, a0, .LBB1_6
+; NO-SINK-NEXT: .LBB1_7: # %for.cond.cleanup
+; NO-SINK-NEXT: ret
;
; SINK-LABEL: sink_splat_add_scalable:
; SINK: # %bb.0: # %entry
@@ -264,23 +264,23 @@ for.body: ; preds = %for.body.preheader,
declare <4 x i32> @llvm.vp.add.v4i32(<4 x i32>, <4 x i32>, <4 x i1>, i32)
define void @sink_splat_vp_add(i32* nocapture %a, i32 signext %x, <4 x i1> %m, i32 zeroext %vl) {
-; DONT-SINK-LABEL: sink_splat_vp_add:
-; DONT-SINK: # %bb.0: # %entry
-; DONT-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; DONT-SINK-NEXT: vmv.v.x v8, a1
-; DONT-SINK-NEXT: lui a1, 1
-; DONT-SINK-NEXT: add a1, a0, a1
-; DONT-SINK-NEXT: .LBB2_1: # %vector.body
-; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
-; DONT-SINK-NEXT: vle32.v v9, (a0)
-; DONT-SINK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
-; DONT-SINK-NEXT: vadd.vv v9, v9, v8, v0.t
-; DONT-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; DONT-SINK-NEXT: vse32.v v9, (a0)
-; DONT-SINK-NEXT: addi a0, a0, 16
-; DONT-SINK-NEXT: bne a0, a1, .LBB2_1
-; DONT-SINK-NEXT: # %bb.2: # %for.cond.cleanup
-; DONT-SINK-NEXT: ret
+; NO-SINK-LABEL: sink_splat_vp_add:
+; NO-SINK: # %bb.0: # %entry
+; NO-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; NO-SINK-NEXT: vmv.v.x v8, a1
+; NO-SINK-NEXT: lui a1, 1
+; NO-SINK-NEXT: add a1, a0, a1
+; NO-SINK-NEXT: .LBB2_1: # %vector.body
+; NO-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; NO-SINK-NEXT: vle32.v v9, (a0)
+; NO-SINK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
+; NO-SINK-NEXT: vadd.vv v9, v9, v8, v0.t
+; NO-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; NO-SINK-NEXT: vse32.v v9, (a0)
+; NO-SINK-NEXT: addi a0, a0, 16
+; NO-SINK-NEXT: bne a0, a1, .LBB2_1
+; NO-SINK-NEXT: # %bb.2: # %for.cond.cleanup
+; NO-SINK-NEXT: ret
;
; SINK-LABEL: sink_splat_vp_add:
; SINK: # %bb.0: # %entry
@@ -337,21 +337,21 @@ for.cond.cleanup: ; preds = %vector.body
}
define void @sink_splat_fadd(float* nocapture %a, float %x) {
-; DONT-SINK-LABEL: sink_splat_fadd:
-; DONT-SINK: # %bb.0: # %entry
-; DONT-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; DONT-SINK-NEXT: vfmv.v.f v8, fa0
-; DONT-SINK-NEXT: lui a1, 1
-; DONT-SINK-NEXT: add a1, a0, a1
-; DONT-SINK-NEXT: .LBB3_1: # %vector.body
-; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
-; DONT-SINK-NEXT: vle32.v v9, (a0)
-; DONT-SINK-NEXT: vfadd.vv v9, v9, v8
-; DONT-SINK-NEXT: vse32.v v9, (a0)
-; DONT-SINK-NEXT: addi a0, a0, 16
-; DONT-SINK-NEXT: bne a0, a1, .LBB3_1
-; DONT-SINK-NEXT: # %bb.2: # %for.cond.cleanup
-; DONT-SINK-NEXT: ret
+; NO-SINK-LABEL: sink_splat_fadd:
+; NO-SINK: # %bb.0: # %entry
+; NO-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; NO-SINK-NEXT: vfmv.v.f v8, fa0
+; NO-SINK-NEXT: lui a1, 1
+; NO-SINK-NEXT: add a1, a0, a1
+; NO-SINK-NEXT: .LBB3_1: # %vector.body
+; NO-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; NO-SINK-NEXT: vle32.v v9, (a0)
+; NO-SINK-NEXT: vfadd.vv v9, v9, v8
+; NO-SINK-NEXT: vse32.v v9, (a0)
+; NO-SINK-NEXT: addi a0, a0, 16
+; NO-SINK-NEXT: bne a0, a1, .LBB3_1
+; NO-SINK-NEXT: # %bb.2: # %for.cond.cleanup
+; NO-SINK-NEXT: ret
;
; SINK-LABEL: sink_splat_fadd:
; SINK: # %bb.0: # %entry
@@ -404,47 +404,47 @@ for.cond.cleanup: ; preds = %vector.body
}
define void @sink_splat_fadd_scalable(float* nocapture %a, float %x) {
-; DONT-SINK-LABEL: sink_splat_fadd_scalable:
-; DONT-SINK: # %bb.0: # %entry
-; DONT-SINK-NEXT: csrr a1, vlenb
-; DONT-SINK-NEXT: srli a2, a1, 2
-; DONT-SINK-NEXT: li a3, 1024
-; DONT-SINK-NEXT: bgeu a3, a2, .LBB4_2
-; DONT-SINK-NEXT: # %bb.1:
-; DONT-SINK-NEXT: li a3, 0
-; DONT-SINK-NEXT: j .LBB4_5
-; DONT-SINK-NEXT: .LBB4_2: # %vector.ph
-; DONT-SINK-NEXT: addi a3, a2, -1
-; DONT-SINK-NEXT: andi a4, a3, 1024
-; DONT-SINK-NEXT: xori a3, a4, 1024
-; DONT-SINK-NEXT: vsetvli a5, zero, e32, m1, ta, ma
-; DONT-SINK-NEXT: vfmv.v.f v8, fa0
-; DONT-SINK-NEXT: mv a5, a0
-; DONT-SINK-NEXT: mv a6, a3
-; DONT-SINK-NEXT: .LBB4_3: # %vector.body
-; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
-; DONT-SINK-NEXT: vl1re32.v v9, (a5)
-; DONT-SINK-NEXT: vfadd.vv v9, v9, v8
-; DONT-SINK-NEXT: vs1r.v v9, (a5)
-; DONT-SINK-NEXT: sub a6, a6, a2
-; DONT-SINK-NEXT: add a5, a5, a1
-; DONT-SINK-NEXT: bnez a6, .LBB4_3
-; DONT-SINK-NEXT: # %bb.4: # %middle.block
-; DONT-SINK-NEXT: beqz a4, .LBB4_7
-; DONT-SINK-NEXT: .LBB4_5: # %for.body.preheader
-; DONT-SINK-NEXT: slli a1, a3, 2
-; DONT-SINK-NEXT: add a1, a0, a1
-; DONT-SINK-NEXT: lui a2, 1
-; DONT-SINK-NEXT: add a0, a0, a2
-; DONT-SINK-NEXT: .LBB4_6: # %for.body
-; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
-; DONT-SINK-NEXT: flw fa5, 0(a1)
-; DONT-SINK-NEXT: fadd.s fa5, fa5, fa0
-; DONT-SINK-NEXT: fsw fa5, 0(a1)
-; DONT-SINK-NEXT: addi a1, a1, 4
-; DONT-SINK-NEXT: bne a1, a0, .LBB4_6
-; DONT-SINK-NEXT: .LBB4_7: # %for.cond.cleanup
-; DONT-SINK-NEXT: ret
+; NO-SINK-LABEL: sink_splat_fadd_scalable:
+; NO-SINK: # %bb.0: # %entry
+; NO-SINK-NEXT: csrr a1, vlenb
+; NO-SINK-NEXT: srli a2, a1, 2
+; NO-SINK-NEXT: li a3, 1024
+; NO-SINK-NEXT: bgeu a3, a2, .LBB4_2
+; NO-SINK-NEXT: # %bb.1:
+; NO-SINK-NEXT: li a3, 0
+; NO-SINK-NEXT: j .LBB4_5
+; NO-SINK-NEXT: .LBB4_2: # %vector.ph
+; NO-SINK-NEXT: addi a3, a2, -1
+; NO-SINK-NEXT: andi a4, a3, 1024
+; NO-SINK-NEXT: xori a3, a4, 1024
+; NO-SINK-NEXT: vsetvli a5, zero, e32, m1, ta, ma
+; NO-SINK-NEXT: vfmv.v.f v8, fa0
+; NO-SINK-NEXT: mv a5, a0
+; NO-SINK-NEXT: mv a6, a3
+; NO-SINK-NEXT: .LBB4_3: # %vector.body
+; NO-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; NO-SINK-NEXT: vl1re32.v v9, (a5)
+; NO-SINK-NEXT: vfadd.vv v9, v9, v8
+; NO-SINK-NEXT: vs1r.v v9, (a5)
+; NO-SINK-NEXT: sub a6, a6, a2
+; NO-SINK-NEXT: add a5, a5, a1
+; NO-SINK-NEXT: bnez a6, .LBB4_3
+; NO-SINK-NEXT: # %bb.4: # %middle.block
+; NO-SINK-NEXT: beqz a4, .LBB4_7
+; NO-SINK-NEXT: .LBB4_5: # %for.body.preheader
+; NO-SINK-NEXT: slli a1, a3, 2
+; NO-SINK-NEXT: add a1, a0, a1
+; NO-SINK-NEXT: lui a2, 1
+; NO-SINK-NEXT: add a0, a0, a2
+; NO-SINK-NEXT: .LBB4_6: # %for.body
+; NO-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; NO-SINK-NEXT: flw fa5, 0(a1)
+; NO-SINK-NEXT: fadd.s fa5, fa5, fa0
+; NO-SINK-NEXT: fsw fa5, 0(a1)
+; NO-SINK-NEXT: addi a1, a1, 4
+; NO-SINK-NEXT: bne a1, a0, .LBB4_6
+; NO-SINK-NEXT: .LBB4_7: # %for.cond.cleanup
+; NO-SINK-NEXT: ret
;
; SINK-LABEL: sink_splat_fadd_scalable:
; SINK: # %bb.0: # %entry
@@ -581,23 +581,23 @@ for.body: ; preds = %for.body.preheader,
declare <4 x float> @llvm.vp.fadd.v4i32(<4 x float>, <4 x float>, <4 x i1>, i32)
define void @sink_splat_vp_fadd(float* nocapture %a, float %x, <4 x i1> %m, i32 zeroext %vl) {
-; DONT-SINK-LABEL: sink_splat_vp_fadd:
-; DONT-SINK: # %bb.0: # %entry
-; DONT-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; DONT-SINK-NEXT: vfmv.v.f v8, fa0
-; DONT-SINK-NEXT: lui a2, 1
-; DONT-SINK-NEXT: add a2, a0, a2
-; DONT-SINK-NEXT: .LBB5_1: # %vector.body
-; DONT-SINK-NEXT: # =>This Inner Loop Header: Depth=1
-; DONT-SINK-NEXT: vle32.v v9, (a0)
-; DONT-SINK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
-; DONT-SINK-NEXT: vfadd.vv v9, v9, v8, v0.t
-; DONT-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; DONT-SINK-NEXT: vse32.v v9, (a0)
-; DONT-SINK-NEXT: addi a0, a0, 16
-; DONT-SINK-NEXT: bne a0, a2, .LBB5_1
-; DONT-SINK-NEXT: # %bb.2: # %for.cond.cleanup
-; DONT-SINK-NEXT: ret
+; NO-SINK-LABEL: sink_splat_vp_fadd:
+; NO-SINK: # %bb.0: # %entry
+; NO-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; NO-SINK-NEXT: vfmv.v.f v8, fa0
+; NO-SINK-NEXT: lui a2, 1
+; NO-SINK-NEXT: add a2, a0, a2
+; NO-SINK-NEXT: .LBB5_1: # %vector.body
+; NO-SINK-NEXT: # =>This Inner Loop Header: Depth=1
+; NO-SINK-NEXT: vle32.v v9, (a0)
+; NO-SINK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; NO-SINK-NEXT: vfadd.vv v9, v9, v8, v0.t
+; NO-SINK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; NO-SINK-NEXT: vse32.v v9, (a0)
+; NO-SINK-NEXT: addi a0, a0, 16
+; NO-SINK-NEXT: bne a0, a2, .LBB5_1
+; NO-SINK-NEXT: # %bb.2: # %for.cond.cleanup
+; NO-SINK-NEXT: ret
;
; SINK-LABEL: sink_splat_vp_fadd:
; SINK: # %bb.0: # %entry
More information about the llvm-commits
mailing list