[llvm] 9cfb28d - [RISCV] Change VECTOR_SPLICE mask operation from expand to promote
Lian Wang via llvm-commits
llvm-commits at lists.llvm.org
Thu Jul 7 23:26:54 PDT 2022
Author: Lian Wang
Date: 2022-07-08T06:20:22Z
New Revision: 9cfb28d672a3021c65f0a1462991d53c4e68deb6
URL: https://github.com/llvm/llvm-project/commit/9cfb28d672a3021c65f0a1462991d53c4e68deb6
DIFF: https://github.com/llvm/llvm-project/commit/9cfb28d672a3021c65f0a1462991d53c4e68deb6.diff
LOG: [RISCV] Change VECTOR_SPLICE mask operation from expand to promote
Reviewed By: craig.topper
Differential Revision: https://reviews.llvm.org/D128717
Added:
Modified:
llvm/lib/Target/RISCV/RISCVISelLowering.cpp
llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index b68ebc15a9af..c66af194535d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -526,6 +526,10 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
{ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_TRUNCATE, ISD::VP_SETCC}, VT,
Custom);
setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
+
+ setOperationPromotedToType(
+ ISD::VECTOR_SPLICE, VT,
+ MVT::getVectorVT(MVT::i8, VT.getVectorElementCount()));
}
for (MVT VT : IntVecVTs) {
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
index d05dbe2f0669..32210f5960ca 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
@@ -9,196 +9,351 @@ declare <vscale x 1 x i1> @llvm.experimental.vector.splice.nxv1i1(<vscale x 1 x
define <vscale x 1 x i1> @splice_nxv1i1_offset_negone(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv1i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v10, v9, 1, v0
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
-; CHECK-NEXT: vsm.v v0, (a0)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a1, a1, 3
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vsm.v v8, (a0)
+; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
+; CHECK-NEXT: vslidedown.vx v10, v10, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v9, 1, v0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu
+; CHECK-NEXT: vslideup.vi v10, v8, 1
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%res = call <vscale x 1 x i1> @llvm.experimental.vector.splice.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, i32 -1)
ret <vscale x 1 x i1> %res
}
+define <vscale x 1 x i1> @splice_nxv1i1_offset_max(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b) #0 {
+; CHECK-LABEL: splice_nxv1i1_offset_max:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, mu
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v10, v9, 1, v0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: srli a0, a0, 3
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vslidedown.vi v10, v10, 1
+; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v9, 1, v0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, tu, mu
+; CHECK-NEXT: vslideup.vx v10, v8, a0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %res = call <vscale x 1 x i1> @llvm.experimental.vector.splice.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, i32 1)
+ ret <vscale x 1 x i1> %res
+}
+
declare <vscale x 2 x i1> @llvm.experimental.vector.splice.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, i32)
define <vscale x 2 x i1> @splice_nxv2i1_offset_negone(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv2i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v10, v9, 1, v0
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
-; CHECK-NEXT: vsm.v v0, (a0)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a1, a1, 3
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vsm.v v8, (a0)
+; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, mu
+; CHECK-NEXT: vslidedown.vx v10, v10, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v9, 1, v0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu
+; CHECK-NEXT: vslideup.vi v10, v8, 1
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%res = call <vscale x 2 x i1> @llvm.experimental.vector.splice.nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b, i32 -1)
ret <vscale x 2 x i1> %res
}
+define <vscale x 2 x i1> @splice_nxv2i1_offset_max(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b) #0 {
+; CHECK-LABEL: splice_nxv2i1_offset_max:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, mu
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v10, v9, 1, v0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: srli a0, a0, 2
+; CHECK-NEXT: addi a0, a0, -3
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vslidedown.vi v10, v10, 3
+; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v9, 1, v0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, mu
+; CHECK-NEXT: vslideup.vx v10, v8, a0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, mu
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %res = call <vscale x 2 x i1> @llvm.experimental.vector.splice.nxv2i1(<vscale x 2 x i1> %a, <vscale x 2 x i1> %b, i32 3)
+ ret <vscale x 2 x i1> %res
+}
+
declare <vscale x 4 x i1> @llvm.experimental.vector.splice.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, i32)
define <vscale x 4 x i1> @splice_nxv4i1_offset_negone(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv4i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v10, v9, 1, v0
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vsm.v v0, (a0)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a1, a1, 3
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vsm.v v8, (a0)
+; CHECK-NEXT: srli a0, a0, 1
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, mu
+; CHECK-NEXT: vslidedown.vx v10, v10, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v9, 1, v0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu
+; CHECK-NEXT: vslideup.vi v10, v8, 1
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%res = call <vscale x 4 x i1> @llvm.experimental.vector.splice.nxv4i1(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b, i32 -1)
ret <vscale x 4 x i1> %res
}
+define <vscale x 4 x i1> @splice_nxv4i1_offset_max(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b) #0 {
+; CHECK-LABEL: splice_nxv4i1_offset_max:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, mu
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v10, v9, 1, v0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: srli a0, a0, 1
+; CHECK-NEXT: addi a0, a0, -7
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vslidedown.vi v10, v10, 7
+; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v9, 1, v0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, mu
+; CHECK-NEXT: vslideup.vx v10, v8, a0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %res = call <vscale x 4 x i1> @llvm.experimental.vector.splice.nxv4i1(<vscale x 4 x i1> %a, <vscale x 4 x i1> %b, i32 7)
+ ret <vscale x 4 x i1> %res
+}
+
declare <vscale x 8 x i1> @llvm.experimental.vector.splice.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, i32)
define <vscale x 8 x i1> @splice_nxv8i1_offset_negone(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv8i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v10, v9, 1, v0
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
-; CHECK-NEXT: vsm.v v0, (a0)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a1, a1, 3
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, mu
+; CHECK-NEXT: vslidedown.vx v10, v10, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v9, 1, v0
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu
+; CHECK-NEXT: vslideup.vi v10, v8, 1
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%res = call <vscale x 8 x i1> @llvm.experimental.vector.splice.nxv8i1(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b, i32 -1)
ret <vscale x 8 x i1> %res
}
+define <vscale x 8 x i1> @splice_nxv8i1_offset_max(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b) #0 {
+; CHECK-LABEL: splice_nxv8i1_offset_max:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, mu
+; CHECK-NEXT: vmv.v.i v9, 0
+; CHECK-NEXT: vmerge.vim v10, v9, 1, v0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: addi a0, a0, -15
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vslidedown.vi v10, v10, 15
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v9, 1, v0
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, tu, mu
+; CHECK-NEXT: vslideup.vx v10, v8, a0
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vand.vi v8, v10, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %res = call <vscale x 8 x i1> @llvm.experimental.vector.splice.nxv8i1(<vscale x 8 x i1> %a, <vscale x 8 x i1> %b, i32 15)
+ ret <vscale x 8 x i1> %res
+}
+
declare <vscale x 16 x i1> @llvm.experimental.vector.splice.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, i32)
define <vscale x 16 x i1> @splice_nxv16i1_offset_negone(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv16i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
-; CHECK-NEXT: vsm.v v0, (a0)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a1, a1, 2
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, mu
+; CHECK-NEXT: vslidedown.vx v12, v12, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu
+; CHECK-NEXT: vslideup.vi v12, v8, 1
+; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%res = call <vscale x 16 x i1> @llvm.experimental.vector.splice.nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, i32 -1)
ret <vscale x 16 x i1> %res
}
+define <vscale x 16 x i1> @splice_nxv16i1_offset_max(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b) #0 {
+; CHECK-LABEL: splice_nxv16i1_offset_max:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, mu
+; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: addi a0, a0, -31
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vslidedown.vi v12, v12, 31
+; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsetvli zero, zero, e8, m2, tu, mu
+; CHECK-NEXT: vslideup.vx v12, v8, a0
+; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, mu
+; CHECK-NEXT: vand.vi v8, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %res = call <vscale x 16 x i1> @llvm.experimental.vector.splice.nxv16i1(<vscale x 16 x i1> %a, <vscale x 16 x i1> %b, i32 31)
+ ret <vscale x 16 x i1> %res
+}
+
declare <vscale x 32 x i1> @llvm.experimental.vector.splice.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>, i32)
define <vscale x 32 x i1> @splice_nxv32i1_offset_negone(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv32i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: vmerge.vim v16, v12, 1, v0
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
-; CHECK-NEXT: vsm.v v0, (a0)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a1, a1, 1
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vsm.v v8, (a0)
+; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, mu
+; CHECK-NEXT: vslidedown.vx v16, v16, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v12, 1, v0
+; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, mu
+; CHECK-NEXT: vslideup.vi v16, v8, 1
+; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, mu
+; CHECK-NEXT: vand.vi v8, v16, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%res = call <vscale x 32 x i1> @llvm.experimental.vector.splice.nxv32i1(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b, i32 -1)
ret <vscale x 32 x i1> %res
}
+define <vscale x 32 x i1> @splice_nxv32i1_offset_max(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b) #0 {
+; CHECK-LABEL: splice_nxv32i1_offset_max:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, mu
+; CHECK-NEXT: vmv.v.i v12, 0
+; CHECK-NEXT: vmerge.vim v16, v12, 1, v0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 2
+; CHECK-NEXT: addi a0, a0, -63
+; CHECK-NEXT: li a1, 63
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vslidedown.vx v16, v16, a1
+; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v12, 1, v0
+; CHECK-NEXT: vsetvli zero, zero, e8, m4, tu, mu
+; CHECK-NEXT: vslideup.vx v16, v8, a0
+; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, mu
+; CHECK-NEXT: vand.vi v8, v16, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %res = call <vscale x 32 x i1> @llvm.experimental.vector.splice.nxv32i1(<vscale x 32 x i1> %a, <vscale x 32 x i1> %b, i32 63)
+ ret <vscale x 32 x i1> %res
+}
+
declare <vscale x 64 x i1> @llvm.experimental.vector.splice.nxv64i1(<vscale x 64 x i1>, <vscale x 64 x i1>, i32)
define <vscale x 64 x i1> @splice_nxv64i1_offset_negone(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) #0 {
; CHECK-LABEL: splice_nxv64i1_offset_negone:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: vmerge.vim v24, v16, 1, v0
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
-; CHECK-NEXT: vsm.v v0, (a0)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: vsm.v v8, (a0)
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 1
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu
+; CHECK-NEXT: vslidedown.vx v24, v24, a0
+; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v16, 1, v0
+; CHECK-NEXT: vsetvli zero, zero, e8, m8, tu, mu
+; CHECK-NEXT: vslideup.vi v24, v8, 1
+; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, mu
+; CHECK-NEXT: vand.vi v8, v24, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%res = call <vscale x 64 x i1> @llvm.experimental.vector.splice.nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b, i32 -1)
ret <vscale x 64 x i1> %res
}
+define <vscale x 64 x i1> @splice_nxv64i1_offset_max(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b) #0 {
+; CHECK-LABEL: splice_nxv64i1_offset_max:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, mu
+; CHECK-NEXT: vmv.v.i v16, 0
+; CHECK-NEXT: vmerge.vim v24, v16, 1, v0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: addi a0, a0, -127
+; CHECK-NEXT: li a1, 127
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vslidedown.vx v24, v24, a1
+; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v16, 1, v0
+; CHECK-NEXT: vsetvli zero, zero, e8, m8, tu, mu
+; CHECK-NEXT: vslideup.vx v24, v8, a0
+; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, mu
+; CHECK-NEXT: vand.vi v8, v24, 1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
+; CHECK-NEXT: ret
+ %res = call <vscale x 64 x i1> @llvm.experimental.vector.splice.nxv64i1(<vscale x 64 x i1> %a, <vscale x 64 x i1> %b, i32 127)
+ ret <vscale x 64 x i1> %res
+}
+
declare <vscale x 1 x i8> @llvm.experimental.vector.splice.nxv1i8(<vscale x 1 x i8>, <vscale x 1 x i8>, i32)
define <vscale x 1 x i8> @splice_nxv1i8_offset_zero(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b) #0 {
More information about the llvm-commits
mailing list