[llvm] 53edba8 - [RISCV] Add vp.reverse tests for Zvfh and fractional lmuls. NFC
Craig Topper via llvm-commits
llvm-commits at lists.llvm.org
Mon Jun 23 10:24:12 PDT 2025
Author: Craig Topper
Date: 2025-06-23T10:23:51-07:00
New Revision: 53edba8091dfd24e2b44204ae8263b009d95414f
URL: https://github.com/llvm/llvm-project/commit/53edba8091dfd24e2b44204ae8263b009d95414f
DIFF: https://github.com/llvm/llvm-project/commit/53edba8091dfd24e2b44204ae8263b009d95414f.diff
LOG: [RISCV] Add vp.reverse tests for Zvfh and fractional lmuls. NFC
Added:
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-reverser-float.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-reverser-int.ll
Modified:
llvm/test/CodeGen/RISCV/rvv/vp-reverse-float.ll
llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
Removed:
llvm/test/CodeGen/RISCV/rvv/vp-reverse-float-fixed-vectors.ll
llvm/test/CodeGen/RISCV/rvv/vp-reverse-int-fixed-vectors.ll
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-float-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-reverser-float.ll
similarity index 65%
rename from llvm/test/CodeGen/RISCV/rvv/vp-reverse-float-fixed-vectors.ll
rename to llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-reverser-float.ll
index 136f6e7bc9990..1d21cb5586984 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-float-fixed-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-reverser-float.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+v -verify-machineinstrs -riscv-v-vector-bits-min=128 \
+; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+v,+zvfh -verify-machineinstrs -riscv-v-vector-bits-min=128 \
; RUN: < %s | FileCheck %s
define <2 x double> @test_vp_reverse_v2f64_masked(<2 x double> %src, <2 x i1> %mask, i32 zeroext %evl) {
@@ -60,5 +60,31 @@ define <4 x float> @test_vp_reverse_v4f32(<4 x float> %src, i32 zeroext %evl) {
ret <4 x float> %dst
}
-declare <2 x double> @llvm.experimental.vp.reverse.v2f64(<2 x double>,<2 x i1>,i32)
-declare <4 x float> @llvm.experimental.vp.reverse.v4f32(<4 x float>,<4 x i1>,i32)
+define <4 x half> @test_vp_reverse_v4f16_masked(<4 x half> %src, <4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_v4f16_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vid.v v9, v0.t
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t
+; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %dst = call <4 x half> @llvm.experimental.vp.reverse.v4f16(<4 x half> %src, <4 x i1> %mask, i32 %evl)
+ ret <4 x half> %dst
+}
+
+define <4 x half> @test_vp_reverse_v4f16(<4 x half> %src, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_v4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: vrsub.vx v10, v9, a1
+; CHECK-NEXT: vrgather.vv v9, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+
+ %dst = call <4 x half> @llvm.experimental.vp.reverse.v4f16(<4 x half> %src, <4 x i1> splat (i1 1), i32 %evl)
+ ret <4 x half> %dst
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-reverser-int.ll
similarity index 100%
rename from llvm/test/CodeGen/RISCV/rvv/vp-reverse-int-fixed-vectors.ll
rename to llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vp-reverser-int.ll
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-float.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-float.ll
index b235990ab5dd0..4bbd10df5254f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-float.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-float.ll
@@ -1,5 +1,92 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+v -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+v,+zvfh -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x half> @test_vp_reverse_nxv1f16_masked(<vscale x 1 x half> %src, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv1f16_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vid.v v9, v0.t
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t
+; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %dst = call <vscale x 1 x half> @llvm.experimental.vp.reverse.nxv1f16(<vscale x 1 x half> %src, <vscale x 1 x i1> %mask, i32 %evl)
+ ret <vscale x 1 x half> %dst
+}
+
+define <vscale x 1 x half> @test_vp_reverse_nxv1f16(<vscale x 1 x half> %src, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv1f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: vrsub.vx v10, v9, a1
+; CHECK-NEXT: vrgather.vv v9, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+
+ %dst = call <vscale x 1 x half> @llvm.experimental.vp.reverse.nxv1f16(<vscale x 1 x half> %src, <vscale x 1 x i1> splat (i1 1), i32 %evl)
+ ret <vscale x 1 x half> %dst
+}
+
+define <vscale x 1 x float> @test_vp_reverse_nxv1f32_masked(<vscale x 1 x float> %src, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv1f32_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vid.v v9, v0.t
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t
+; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %dst = call <vscale x 1 x float> @llvm.experimental.vp.reverse.nxv1f32(<vscale x 1 x float> %src, <vscale x 1 x i1> %mask, i32 %evl)
+ ret <vscale x 1 x float> %dst
+}
+
+define <vscale x 1 x float> @test_vp_reverse_nxv1f32(<vscale x 1 x float> %src, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv1f32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: vrsub.vx v10, v9, a1
+; CHECK-NEXT: vrgather.vv v9, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+
+ %dst = call <vscale x 1 x float> @llvm.experimental.vp.reverse.nxv1f32(<vscale x 1 x float> %src, <vscale x 1 x i1> splat (i1 1), i32 %evl)
+ ret <vscale x 1 x float> %dst
+}
+
+define <vscale x 2 x half> @test_vp_reverse_nxv2f16_masked(<vscale x 2 x half> %src, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv2f16_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vid.v v9, v0.t
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t
+; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %dst = call <vscale x 2 x half> @llvm.experimental.vp.reverse.nxv2f16(<vscale x 2 x half> %src, <vscale x 2 x i1> %mask, i32 %evl)
+ ret <vscale x 2 x half> %dst
+}
+
+define <vscale x 2 x half> @test_vp_reverse_nxv2f16(<vscale x 2 x half> %src, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv2f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: vrsub.vx v10, v9, a1
+; CHECK-NEXT: vrgather.vv v9, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+
+ %dst = call <vscale x 2 x half> @llvm.experimental.vp.reverse.nxv2f16(<vscale x 2 x half> %src, <vscale x 2 x i1> splat (i1 1), i32 %evl)
+ ret <vscale x 2 x half> %dst
+}
define <vscale x 1 x double> @test_vp_reverse_nxv1f64_masked(<vscale x 1 x double> %src, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: test_vp_reverse_nxv1f64_masked:
@@ -59,6 +146,35 @@ define <vscale x 2 x float> @test_vp_reverse_nxv2f32(<vscale x 2 x float> %src,
ret <vscale x 2 x float> %dst
}
+define <vscale x 4 x half> @test_vp_reverse_nxv4f16_masked(<vscale x 4 x half> %src, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv4f16_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vid.v v9, v0.t
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t
+; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+ %dst = call <vscale x 4 x half> @llvm.experimental.vp.reverse.nxv4f16(<vscale x 4 x half> %src, <vscale x 4 x i1> %mask, i32 %evl)
+ ret <vscale x 4 x half> %dst
+}
+
+define <vscale x 4 x half> @test_vp_reverse_nxv4f16(<vscale x 4 x half> %src, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv4f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: vrsub.vx v10, v9, a1
+; CHECK-NEXT: vrgather.vv v9, v8, v10
+; CHECK-NEXT: vmv.v.v v8, v9
+; CHECK-NEXT: ret
+
+ %dst = call <vscale x 4 x half> @llvm.experimental.vp.reverse.nxv4f16(<vscale x 4 x half> %src, <vscale x 4 x i1> splat (i1 1), i32 %evl)
+ ret <vscale x 4 x half> %dst
+}
+
define <vscale x 2 x double> @test_vp_reverse_nxv2f64_masked(<vscale x 2 x double> %src, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: test_vp_reverse_nxv2f64_masked:
; CHECK: # %bb.0:
@@ -117,6 +233,35 @@ define <vscale x 4 x float> @test_vp_reverse_nxv4f32(<vscale x 4 x float> %src,
ret <vscale x 4 x float> %dst
}
+define <vscale x 8 x half> @test_vp_reverse_nxv8f16_masked(<vscale x 8 x half> %src, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv8f16_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vid.v v10, v0.t
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vrsub.vx v12, v10, a0, v0.t
+; CHECK-NEXT: vrgather.vv v10, v8, v12, v0.t
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+ %dst = call <vscale x 8 x half> @llvm.experimental.vp.reverse.nxv8f16(<vscale x 8 x half> %src, <vscale x 8 x i1> %mask, i32 %evl)
+ ret <vscale x 8 x half> %dst
+}
+
+define <vscale x 8 x half> @test_vp_reverse_nxv8f16(<vscale x 8 x half> %src, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv8f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT: vid.v v10
+; CHECK-NEXT: vrsub.vx v12, v10, a1
+; CHECK-NEXT: vrgather.vv v10, v8, v12
+; CHECK-NEXT: vmv.v.v v8, v10
+; CHECK-NEXT: ret
+
+ %dst = call <vscale x 8 x half> @llvm.experimental.vp.reverse.nxv8f16(<vscale x 8 x half> %src, <vscale x 8 x i1> splat (i1 1), i32 %evl)
+ ret <vscale x 8 x half> %dst
+}
+
define <vscale x 4 x double> @test_vp_reverse_nxv4f64_masked(<vscale x 4 x double> %src, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: test_vp_reverse_nxv4f64_masked:
; CHECK: # %bb.0:
@@ -175,6 +320,35 @@ define <vscale x 8 x float> @test_vp_reverse_nxv8f32(<vscale x 8 x float> %src,
ret <vscale x 8 x float> %dst
}
+define <vscale x 16 x half> @test_vp_reverse_nxv16f16_masked(<vscale x 16 x half> %src, <vscale x 16 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv16f16_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vid.v v12, v0.t
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vrsub.vx v16, v12, a0, v0.t
+; CHECK-NEXT: vrgather.vv v12, v8, v16, v0.t
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+ %dst = call <vscale x 16 x half> @llvm.experimental.vp.reverse.nxv16f16(<vscale x 16 x half> %src, <vscale x 16 x i1> %mask, i32 %evl)
+ ret <vscale x 16 x half> %dst
+}
+
+define <vscale x 16 x half> @test_vp_reverse_nxv16f16(<vscale x 16 x half> %src, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv16f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT: vid.v v12
+; CHECK-NEXT: vrsub.vx v16, v12, a1
+; CHECK-NEXT: vrgather.vv v12, v8, v16
+; CHECK-NEXT: vmv.v.v v8, v12
+; CHECK-NEXT: ret
+
+ %dst = call <vscale x 16 x half> @llvm.experimental.vp.reverse.nxv16f16(<vscale x 16 x half> %src, <vscale x 16 x i1> splat (i1 1), i32 %evl)
+ ret <vscale x 16 x half> %dst
+}
+
define <vscale x 8 x double> @test_vp_reverse_nxv8f64_masked(<vscale x 8 x double> %src, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: test_vp_reverse_nxv8f64_masked:
; CHECK: # %bb.0:
@@ -233,18 +407,31 @@ define <vscale x 16 x float> @test_vp_reverse_nxv16f32(<vscale x 16 x float> %sr
ret <vscale x 16 x float> %dst
}
-; LMUL = 1
-declare <vscale x 1 x double> @llvm.experimental.vp.reverse.nxv1f64(<vscale x 1 x double>,<vscale x 1 x i1>,i32)
-declare <vscale x 2 x float> @llvm.experimental.vp.reverse.nxv2f32(<vscale x 2 x float>,<vscale x 2 x i1>,i32)
-
-; LMUL = 2
-declare <vscale x 2 x double> @llvm.experimental.vp.reverse.nxv2f64(<vscale x 2 x double>,<vscale x 2 x i1>,i32)
-declare <vscale x 4 x float> @llvm.experimental.vp.reverse.nxv4f32(<vscale x 4 x float>,<vscale x 4 x i1>,i32)
+define <vscale x 32 x half> @test_vp_reverse_nxv32f16_masked(<vscale x 32 x half> %src, <vscale x 32 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv32f16_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vid.v v16, v0.t
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vrsub.vx v24, v16, a0, v0.t
+; CHECK-NEXT: vrgather.vv v16, v8, v24, v0.t
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
+ %dst = call <vscale x 32 x half> @llvm.experimental.vp.reverse.nxv32f16(<vscale x 32 x half> %src, <vscale x 32 x i1> %mask, i32 %evl)
+ ret <vscale x 32 x half> %dst
+}
-; LMUL = 4
-declare <vscale x 4 x double> @llvm.experimental.vp.reverse.nxv4f64(<vscale x 4 x double>,<vscale x 4 x i1>,i32)
-declare <vscale x 8 x float> @llvm.experimental.vp.reverse.nxv8f32(<vscale x 8 x float>,<vscale x 8 x i1>,i32)
+define <vscale x 32 x half> @test_vp_reverse_nxv32f16(<vscale x 32 x half> %src, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv32f16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
+; CHECK-NEXT: vid.v v16
+; CHECK-NEXT: vrsub.vx v24, v16, a1
+; CHECK-NEXT: vrgather.vv v16, v8, v24
+; CHECK-NEXT: vmv.v.v v8, v16
+; CHECK-NEXT: ret
-; LMUL = 8
-declare <vscale x 8 x double> @llvm.experimental.vp.reverse.nxv8f64(<vscale x 8 x double>,<vscale x 8 x i1>,i32)
-declare <vscale x 16 x float> @llvm.experimental.vp.reverse.nxv16f32(<vscale x 16 x float>,<vscale x 16 x i1>,i32)
+ %dst = call <vscale x 32 x half> @llvm.experimental.vp.reverse.nxv32f16(<vscale x 32 x half> %src, <vscale x 32 x i1> splat (i1 1), i32 %evl)
+ ret <vscale x 32 x half> %dst
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
index 507f5154cf1ac..c96a7d774a5d5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
@@ -1,6 +1,186 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv64 -mattr=+m,+v -verify-machineinstrs < %s | FileCheck %s
+define <vscale x 1 x i8> @test_vp_reverse_nxv1i8_masked(<vscale x 1 x i8> %src, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv1i8_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vid.v v9, v0.t
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vrgatherei16.vv v9, v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %dst = call <vscale x 1 x i8> @llvm.experimental.vp.reverse.nxv1i8(<vscale x 1 x i8> %src, <vscale x 1 x i1> %mask, i32 %evl)
+ ret <vscale x 1 x i8> %dst
+}
+
+define <vscale x 1 x i8> @test_vp_reverse_nxv1i8(<vscale x 1 x i8> %src, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv1i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: vrsub.vx v10, v9, a1
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vrgatherei16.vv v9, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+
+ %dst = call <vscale x 1 x i8> @llvm.experimental.vp.reverse.nxv1i8(<vscale x 1 x i8> %src, <vscale x 1 x i1> splat (i1 1), i32 %evl)
+ ret <vscale x 1 x i8> %dst
+}
+
+define <vscale x 1 x i16> @test_vp_reverse_nxv1i16_masked(<vscale x 1 x i16> %src, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv1i16_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vid.v v9, v0.t
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t
+; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %dst = call <vscale x 1 x i16> @llvm.experimental.vp.reverse.nxv1i16(<vscale x 1 x i16> %src, <vscale x 1 x i1> %mask, i32 %evl)
+ ret <vscale x 1 x i16> %dst
+}
+
+define <vscale x 1 x i16> @test_vp_reverse_nxv1i16(<vscale x 1 x i16> %src, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv1i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: vrsub.vx v10, v9, a1
+; CHECK-NEXT: vrgather.vv v9, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+
+ %dst = call <vscale x 1 x i16> @llvm.experimental.vp.reverse.nxv1i16(<vscale x 1 x i16> %src, <vscale x 1 x i1> splat (i1 1), i32 %evl)
+ ret <vscale x 1 x i16> %dst
+}
+
+define <vscale x 2 x i8> @test_vp_reverse_nxv2i8_masked(<vscale x 2 x i8> %src, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv2i8_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vid.v v9, v0.t
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vrgatherei16.vv v9, v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %dst = call <vscale x 2 x i8> @llvm.experimental.vp.reverse.nxv2i8(<vscale x 2 x i8> %src, <vscale x 2 x i1> %mask, i32 %evl)
+ ret <vscale x 2 x i8> %dst
+}
+
+define <vscale x 2 x i8> @test_vp_reverse_nxv2i8(<vscale x 2 x i8> %src, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv2i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: vrsub.vx v10, v9, a1
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vrgatherei16.vv v9, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+
+ %dst = call <vscale x 2 x i8> @llvm.experimental.vp.reverse.nxv2i8(<vscale x 2 x i8> %src, <vscale x 2 x i1> splat (i1 1), i32 %evl)
+ ret <vscale x 2 x i8> %dst
+}
+
+define <vscale x 1 x i32> @test_vp_reverse_nxv1i32_masked(<vscale x 1 x i32> %src, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv1i32_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vid.v v9, v0.t
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t
+; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %dst = call <vscale x 1 x i32> @llvm.experimental.vp.reverse.nxv1i32(<vscale x 1 x i32> %src, <vscale x 1 x i1> %mask, i32 %evl)
+ ret <vscale x 1 x i32> %dst
+}
+
+define <vscale x 1 x i32> @test_vp_reverse_nxv1i32(<vscale x 1 x i32> %src, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv1i32:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: vrsub.vx v10, v9, a1
+; CHECK-NEXT: vrgather.vv v9, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+
+ %dst = call <vscale x 1 x i32> @llvm.experimental.vp.reverse.nxv1i32(<vscale x 1 x i32> %src, <vscale x 1 x i1> splat (i1 1), i32 %evl)
+ ret <vscale x 1 x i32> %dst
+}
+
+define <vscale x 2 x i16> @test_vp_reverse_nxv2i16_masked(<vscale x 2 x i16> %src, <vscale x 2 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv2i16_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vid.v v9, v0.t
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t
+; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %dst = call <vscale x 2 x i16> @llvm.experimental.vp.reverse.nxv2i16(<vscale x 2 x i16> %src, <vscale x 2 x i1> %mask, i32 %evl)
+ ret <vscale x 2 x i16> %dst
+}
+
+define <vscale x 2 x i16> @test_vp_reverse_nxv2i16(<vscale x 2 x i16> %src, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv2i16:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: vrsub.vx v10, v9, a1
+; CHECK-NEXT: vrgather.vv v9, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+
+ %dst = call <vscale x 2 x i16> @llvm.experimental.vp.reverse.nxv2i16(<vscale x 2 x i16> %src, <vscale x 2 x i1> splat (i1 1), i32 %evl)
+ ret <vscale x 2 x i16> %dst
+}
+
+define <vscale x 4 x i8> @test_vp_reverse_nxv4i8_masked(<vscale x 4 x i8> %src, <vscale x 4 x i1> %mask, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv4i8_masked:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vid.v v9, v0.t
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vrgatherei16.vv v9, v8, v10, v0.t
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+ %dst = call <vscale x 4 x i8> @llvm.experimental.vp.reverse.nxv4i8(<vscale x 4 x i8> %src, <vscale x 4 x i1> %mask, i32 %evl)
+ ret <vscale x 4 x i8> %dst
+}
+
+define <vscale x 4 x i8> @test_vp_reverse_nxv4i8(<vscale x 4 x i8> %src, i32 zeroext %evl) {
+; CHECK-LABEL: test_vp_reverse_nxv4i8:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a1, a0, -1
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT: vid.v v9
+; CHECK-NEXT: vrsub.vx v10, v9, a1
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vrgatherei16.vv v9, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v9
+; CHECK-NEXT: ret
+
+ %dst = call <vscale x 4 x i8> @llvm.experimental.vp.reverse.nxv4i8(<vscale x 4 x i8> %src, <vscale x 4 x i1> splat (i1 1), i32 %evl)
+ ret <vscale x 4 x i8> %dst
+}
+
define <vscale x 1 x i64> @test_vp_reverse_nxv1i64_masked(<vscale x 1 x i64> %src, <vscale x 1 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: test_vp_reverse_nxv1i64_masked:
; CHECK: # %bb.0:
@@ -501,10 +681,10 @@ define <vscale x 128 x i8> @test_vp_reverse_nxv128i8(<vscale x 128 x i8> %src, i
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: mv a1, a0
-; CHECK-NEXT: bltu a0, a2, .LBB32_2
+; CHECK-NEXT: bltu a0, a2, .LBB44_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
-; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: .LBB44_2:
; CHECK-NEXT: addi sp, sp, -80
; CHECK-NEXT: .cfi_def_cfa_offset 80
; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
@@ -547,29 +727,3 @@ define <vscale x 128 x i8> @test_vp_reverse_nxv128i8(<vscale x 128 x i8> %src, i
%dst = call <vscale x 128 x i8> @llvm.experimental.vp.reverse.nxv128i8(<vscale x 128 x i8> %src, <vscale x 128 x i1> splat (i1 1), i32 %evl)
ret <vscale x 128 x i8> %dst
}
-
-; LMUL = 1
-declare <vscale x 1 x i64> @llvm.experimental.vp.reverse.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i1>,i32)
-declare <vscale x 2 x i32> @llvm.experimental.vp.reverse.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i1>,i32)
-declare <vscale x 4 x i16> @llvm.experimental.vp.reverse.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1>,i32)
-declare <vscale x 8 x i8> @llvm.experimental.vp.reverse.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i1>,i32)
-
-; LMUL = 2
-declare <vscale x 2 x i64> @llvm.experimental.vp.reverse.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i1>,i32)
-declare <vscale x 4 x i32> @llvm.experimental.vp.reverse.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i1>,i32)
-declare <vscale x 8 x i16> @llvm.experimental.vp.reverse.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i1>,i32)
-declare <vscale x 16 x i8> @llvm.experimental.vp.reverse.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i1>,i32)
-
-; LMUL = 4
-declare <vscale x 4 x i64> @llvm.experimental.vp.reverse.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i1>,i32)
-declare <vscale x 8 x i32> @llvm.experimental.vp.reverse.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i1>,i32)
-declare <vscale x 16 x i16> @llvm.experimental.vp.reverse.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x i1>,i32)
-declare <vscale x 32 x i8> @llvm.experimental.vp.reverse.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i1>,i32)
-
-; LMUL = 8
-declare <vscale x 8 x i64> @llvm.experimental.vp.reverse.nxv8i64(<vscale x 8 x i64>,<vscale x 8 x i1>,i32)
-declare <vscale x 16 x i32> @llvm.experimental.vp.reverse.nxv16i32(<vscale x 16 x i32>,<vscale x 16 x i1>,i32)
-declare <vscale x 32 x i16> @llvm.experimental.vp.reverse.nxv32i16(<vscale x 32 x i16>,<vscale x 32 x i1>,i32)
-declare <vscale x 64 x i8> @llvm.experimental.vp.reverse.nxv64i8(<vscale x 64 x i8>,<vscale x 64 x i1>,i32)
-
-declare <vscale x 128 x i8> @llvm.experimental.vp.reverse.nxv128i8(<vscale x 128 x i8>,<vscale x 128 x i1>,i32)
More information about the llvm-commits
mailing list