[llvm] 93b14c3 - [RISCV Add some vsetvli insertion test cases with vmv.s.x+reduction. NFC (#75544)
via llvm-commits
llvm-commits at lists.llvm.org
Fri Dec 15 08:50:58 PST 2023
Author: Craig Topper
Date: 2023-12-15T08:50:54-08:00
New Revision: 93b14c3df17500e675f31674165b5378dd0b4eaf
URL: https://github.com/llvm/llvm-project/commit/93b14c3df17500e675f31674165b5378dd0b4eaf
DIFF: https://github.com/llvm/llvm-project/commit/93b14c3df17500e675f31674165b5378dd0b4eaf.diff
LOG: [RISCV Add some vsetvli insertion test cases with vmv.s.x+reduction. NFC (#75544)
These test cases where intended to get a single vsetvli by using the
vmv.s.x intrinsic with the same LMUL as the reduction. This works for
FP, but does not work for integer.
I believe #71501 will break this for FP too. Hopefully the vsetvli pass
can be taught to fix this.
Added:
Modified:
llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
index a79180ed45b42b..5dead46581a0a3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll
@@ -627,6 +627,41 @@ define void @add_v16i64(ptr %x, ptr %y) vscale_range(2,2) {
ret void
}
+define <vscale x 2 x float> @fp_reduction_vfmv_s_f(float %0, <vscale x 8 x float> %1, i64 %2) {
+; CHECK-LABEL: fp_reduction_vfmv_s_f:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT: vfmv.s.f v12, fa0
+; CHECK-NEXT: vfredusum.vs v8, v8, v12
+; CHECK-NEXT: ret
+ %4 = tail call <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32.i64(<vscale x 8 x float> poison, float %0, i64 %2)
+ %5 = tail call <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float> %4, i64 0)
+ %6 = tail call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> poison, <vscale x 8 x float> %1, <vscale x 2 x float> %5, i64 7, i64 %2)
+ ret <vscale x 2 x float> %6
+}
+
+define dso_local <vscale x 2 x i32> @int_reduction_vmv_s_x(i32 signext %0, <vscale x 8 x i32> %1, i64 %2) {
+; CHECK-LABEL: int_reduction_vmv_s_x:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
+; CHECK-NEXT: vmv.s.x v12, a0
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
+; CHECK-NEXT: vredsum.vs v8, v8, v12
+; CHECK-NEXT: ret
+ %4 = tail call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32> poison, i32 %0, i64 %2)
+ %5 = tail call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %4, i64 0)
+ %6 = tail call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> %1, <vscale x 2 x i32> %5, i64 %2)
+ ret <vscale x 2 x i32> %6
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32.i64(<vscale x 8 x float>, float, i64)
+declare <vscale x 2 x float> @llvm.vector.extract.nxv2f32.nxv8f32(<vscale x 8 x float>, i64)
+declare <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64(<vscale x 2 x float>, <vscale x 8 x float>, <vscale x 2 x float>, i64, i64)
+
+declare <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32>, i32, i64) #1
+declare <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32>, i64 immarg) #2
+declare <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32>, <vscale x 8 x i32>, <vscale x 2 x i32>, i64) #1
+
declare <vscale x 1 x i64> @llvm.riscv.vadd.mask.nxv1i64.nxv1i64(
<vscale x 1 x i64>,
<vscale x 1 x i64>,
More information about the llvm-commits
mailing list