[llvm] [RISCV] Don't increase vslide or splat vl if +vl-dependent-latency is present (PR #147089)
Luke Lau via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 9 01:24:52 PDT 2025
================
@@ -753,3 +753,113 @@ while.body:
while.end:
ret i64 0
}
+
+define <vscale x 1 x i64> @vslideup_vl1(<vscale x 1 x i64> %a) nounwind {
+; NODEPVL-LABEL: vslideup_vl1:
+; NODEPVL: # %bb.0: # %entry
+; NODEPVL-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; NODEPVL-NEXT: vslideup.vi v9, v8, 1
+; NODEPVL-NEXT: vadd.vv v8, v9, v9
+; NODEPVL-NEXT: ret
+;
+; DEPVL-LABEL: vslideup_vl1:
+; DEPVL: # %bb.0: # %entry
+; DEPVL-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; DEPVL-NEXT: vslideup.vi v9, v8, 1
+; DEPVL-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; DEPVL-NEXT: vadd.vv v8, v9, v9
+; DEPVL-NEXT: ret
+entry:
+ %1 = tail call <vscale x 1 x i64> @llvm.riscv.vslideup(
+ <vscale x 1 x i64> poison,
+ <vscale x 1 x i64> %a,
+ i64 1,
+ i64 1,
+ i64 3)
+ %2 = tail call <vscale x 1 x i64> @llvm.riscv.vadd(
+ <vscale x 1 x i64> poison,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %1,
+ i64 2)
+ ret <vscale x 1 x i64> %2
+}
+
+define <vscale x 1 x i64> @vslidedown_vl1(<vscale x 1 x i64> %a) nounwind {
+; NODEPVL-LABEL: vslidedown_vl1:
+; NODEPVL: # %bb.0: # %entry
+; NODEPVL-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; NODEPVL-NEXT: vslidedown.vi v8, v8, 1
+; NODEPVL-NEXT: vadd.vv v8, v8, v8
+; NODEPVL-NEXT: ret
+;
+; DEPVL-LABEL: vslidedown_vl1:
+; DEPVL: # %bb.0: # %entry
+; DEPVL-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; DEPVL-NEXT: vslidedown.vi v8, v8, 1
+; DEPVL-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; DEPVL-NEXT: vadd.vv v8, v8, v8
+; DEPVL-NEXT: ret
+entry:
+ %1 = tail call <vscale x 1 x i64> @llvm.riscv.vslidedown(
+ <vscale x 1 x i64> poison,
+ <vscale x 1 x i64> %a,
+ i64 1,
+ i64 1,
+ i64 3)
+ %2 = tail call <vscale x 1 x i64> @llvm.riscv.vadd(
+ <vscale x 1 x i64> poison,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %1,
+ i64 2)
+ ret <vscale x 1 x i64> %2
+}
+
+
+define <vscale x 1 x i64> @vmv.v.x_vl1() nounwind {
+; NODEPVL-LABEL: vmv.v.x_vl1:
+; NODEPVL: # %bb.0: # %entry
+; NODEPVL-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; NODEPVL-NEXT: vmv.v.i v8, 1
+; NODEPVL-NEXT: vadd.vv v8, v8, v8
+; NODEPVL-NEXT: ret
+;
+; DEPVL-LABEL: vmv.v.x_vl1:
+; DEPVL: # %bb.0: # %entry
+; DEPVL-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; DEPVL-NEXT: vmv.v.i v8, 1
+; DEPVL-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; DEPVL-NEXT: vadd.vv v8, v8, v8
+; DEPVL-NEXT: ret
+entry:
+ %1 = tail call <vscale x 1 x i64> @llvm.riscv.vmv.v.x(
+ <vscale x 1 x i64> poison,
+ i64 1,
+ i64 1)
+ %2 = tail call <vscale x 1 x i64> @llvm.riscv.vadd(
+ <vscale x 1 x i64> poison,
+ <vscale x 1 x i64> %1,
+ <vscale x 1 x i64> %1,
+ i64 2)
+ ret <vscale x 1 x i64> %2
+}
+
+define <vscale x 1 x double> @vfmv.v.f_vl1(double %f) nounwind {
+; CHECK-LABEL: vfmv.v.f_vl1:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vfmv.s.f v8, fa0
----------------
lukel97 wrote:
There's some isel pattern that means we select vfmv.s.f directly, so it doesn't go through this codepath
https://github.com/llvm/llvm-project/pull/147089
More information about the llvm-commits
mailing list