[llvm] 3268ddd - [RISCV] Add test for extract the first element from mask vector.
via llvm-commits
llvm-commits at lists.llvm.org
Wed Dec 7 23:44:10 PST 2022
Author: jacquesguan
Date: 2022-12-08T15:44:02+08:00
New Revision: 3268ddd2b60d6629726135992889658eee48fe3b
URL: https://github.com/llvm/llvm-project/commit/3268ddd2b60d6629726135992889658eee48fe3b
DIFF: https://github.com/llvm/llvm-project/commit/3268ddd2b60d6629726135992889658eee48fe3b.diff
LOG: [RISCV] Add test for extract the first element from mask vector.
Reviewed By: reames
Differential Revision: https://reviews.llvm.org/D139510
Added:
Modified:
llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
index f1fd3343cfdd6..dc3bc4b27abeb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
@@ -213,3 +213,115 @@ define i1 @extractelt_nxv128i1(<vscale x 128 x i8>* %x, i64 %idx) nounwind {
%c = extractelement <vscale x 128 x i1> %b, i64 %idx
ret i1 %c
}
+
+define i1 @extractelt_nxv1i1_idx0(<vscale x 1 x i8>* %x) nounwind {
+; CHECK-LABEL: extractelt_nxv1i1_idx0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vmseq.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+ %a = load <vscale x 1 x i8>, <vscale x 1 x i8>* %x
+ %b = icmp eq <vscale x 1 x i8> %a, zeroinitializer
+ %c = extractelement <vscale x 1 x i1> %b, i64 0
+ ret i1 %c
+}
+
+define i1 @extractelt_nxv2i1_idx0(<vscale x 2 x i8>* %x) nounwind {
+; CHECK-LABEL: extractelt_nxv2i1_idx0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vmseq.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+ %a = load <vscale x 2 x i8>, <vscale x 2 x i8>* %x
+ %b = icmp eq <vscale x 2 x i8> %a, zeroinitializer
+ %c = extractelement <vscale x 2 x i1> %b, i64 0
+ ret i1 %c
+}
+
+define i1 @extractelt_nxv4i1_idx0(<vscale x 4 x i8>* %x) nounwind {
+; CHECK-LABEL: extractelt_nxv4i1_idx0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vmseq.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+ %a = load <vscale x 4 x i8>, <vscale x 4 x i8>* %x
+ %b = icmp eq <vscale x 4 x i8> %a, zeroinitializer
+ %c = extractelement <vscale x 4 x i1> %b, i64 0
+ ret i1 %c
+}
+
+define i1 @extractelt_nxv8i1_idx0(<vscale x 8 x i8>* %x) nounwind {
+; CHECK-LABEL: extractelt_nxv8i1_idx0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vl1r.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmseq.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+ %a = load <vscale x 8 x i8>, <vscale x 8 x i8>* %x
+ %b = icmp eq <vscale x 8 x i8> %a, zeroinitializer
+ %c = extractelement <vscale x 8 x i1> %b, i64 0
+ ret i1 %c
+}
+
+define i1 @extractelt_nxv16i1_idx0(<vscale x 16 x i8>* %x) nounwind {
+; CHECK-LABEL: extractelt_nxv16i1_idx0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vl2r.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmseq.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+ %a = load <vscale x 16 x i8>, <vscale x 16 x i8>* %x
+ %b = icmp eq <vscale x 16 x i8> %a, zeroinitializer
+ %c = extractelement <vscale x 16 x i1> %b, i64 0
+ ret i1 %c
+}
+
+define i1 @extractelt_nxv32i1_idx0(<vscale x 32 x i8>* %x) nounwind {
+; CHECK-LABEL: extractelt_nxv32i1_idx0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vl4r.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
+; CHECK-NEXT: vmseq.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+ %a = load <vscale x 32 x i8>, <vscale x 32 x i8>* %x
+ %b = icmp eq <vscale x 32 x i8> %a, zeroinitializer
+ %c = extractelement <vscale x 32 x i1> %b, i64 0
+ ret i1 %c
+}
+
+define i1 @extractelt_nxv64i1_idx0(<vscale x 64 x i8>* %x) nounwind {
+; CHECK-LABEL: extractelt_nxv64i1_idx0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vl8r.v v8, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; CHECK-NEXT: vmseq.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+ %a = load <vscale x 64 x i8>, <vscale x 64 x i8>* %x
+ %b = icmp eq <vscale x 64 x i8> %a, zeroinitializer
+ %c = extractelement <vscale x 64 x i1> %b, i64 0
+ ret i1 %c
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
index c80b24c6f686d..e8982ce3d02d1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
@@ -447,3 +447,249 @@ define i1 @extractelt_v256i1(<256 x i8>* %x, i64 %idx) nounwind {
%c = extractelement <256 x i1> %b, i64 %idx
ret i1 %c
}
+
+define i1 @extractelt_v1i1_idx0(<1 x i8>* %x) nounwind {
+; CHECK-LABEL: extractelt_v1i1_idx0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vmseq.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+ %a = load <1 x i8>, <1 x i8>* %x
+ %b = icmp eq <1 x i8> %a, zeroinitializer
+ %c = extractelement <1 x i1> %b, i64 0
+ ret i1 %c
+}
+
+define i1 @extractelt_v2i1_idx0(<2 x i8>* %x) nounwind {
+; CHECK-LABEL: extractelt_v2i1_idx0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vmseq.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+ %a = load <2 x i8>, <2 x i8>* %x
+ %b = icmp eq <2 x i8> %a, zeroinitializer
+ %c = extractelement <2 x i1> %b, i64 0
+ ret i1 %c
+}
+
+define i1 @extractelt_v4i1_idx0(<4 x i8>* %x) nounwind {
+; CHECK-LABEL: extractelt_v4i1_idx0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vmseq.vi v0, v8, 0
+; CHECK-NEXT: vmv.v.i v8, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: ret
+ %a = load <4 x i8>, <4 x i8>* %x
+ %b = icmp eq <4 x i8> %a, zeroinitializer
+ %c = extractelement <4 x i1> %b, i64 0
+ ret i1 %c
+}
+
+define i1 @extractelt_v8i1_idx0(<8 x i8>* %x) nounwind {
+; CHECK-LABEL: extractelt_v8i1_idx0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vmseq.vi v8, v8, 0
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: ret
+ %a = load <8 x i8>, <8 x i8>* %x
+ %b = icmp eq <8 x i8> %a, zeroinitializer
+ %c = extractelement <8 x i1> %b, i64 0
+ ret i1 %c
+}
+
+define i1 @extractelt_v16i1_idx0(<16 x i8>* %x) nounwind {
+; CHECK-LABEL: extractelt_v16i1_idx0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vmseq.vi v8, v8, 0
+; CHECK-NEXT: vsetivli zero, 0, e16, mf4, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v8
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: ret
+ %a = load <16 x i8>, <16 x i8>* %x
+ %b = icmp eq <16 x i8> %a, zeroinitializer
+ %c = extractelement <16 x i1> %b, i64 0
+ ret i1 %c
+}
+
+define i1 @extractelt_v32i1_idx0(<32 x i8>* %x) nounwind {
+; CHECK-LABEL: extractelt_v32i1_idx0:
+; CHECK: # %bb.0:
+; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vmseq.vi v10, v8, 0
+; CHECK-NEXT: vsetivli zero, 0, e32, mf2, ta, ma
+; CHECK-NEXT: vmv.x.s a0, v10
+; CHECK-NEXT: andi a0, a0, 1
+; CHECK-NEXT: ret
+ %a = load <32 x i8>, <32 x i8>* %x
+ %b = icmp eq <32 x i8> %a, zeroinitializer
+ %c = extractelement <32 x i1> %b, i64 0
+ ret i1 %c
+}
+
+define i1 @extractelt_v64i1_idx0(<64 x i8>* %x) nounwind {
+; RV32-LABEL: extractelt_v64i1_idx0:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 64
+; RV32-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV32-NEXT: vle8.v v8, (a0)
+; RV32-NEXT: vmseq.vi v12, v8, 0
+; RV32-NEXT: vsetivli zero, 0, e32, mf2, ta, ma
+; RV32-NEXT: vmv.x.s a0, v12
+; RV32-NEXT: andi a0, a0, 1
+; RV32-NEXT: ret
+;
+; RV64-LABEL: extractelt_v64i1_idx0:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 64
+; RV64-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV64-NEXT: vle8.v v8, (a0)
+; RV64-NEXT: vmseq.vi v12, v8, 0
+; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma
+; RV64-NEXT: vmv.x.s a0, v12
+; RV64-NEXT: andi a0, a0, 1
+; RV64-NEXT: ret
+;
+; RV32ZBS-LABEL: extractelt_v64i1_idx0:
+; RV32ZBS: # %bb.0:
+; RV32ZBS-NEXT: li a1, 64
+; RV32ZBS-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV32ZBS-NEXT: vle8.v v8, (a0)
+; RV32ZBS-NEXT: vmseq.vi v12, v8, 0
+; RV32ZBS-NEXT: vsetivli zero, 0, e32, mf2, ta, ma
+; RV32ZBS-NEXT: vmv.x.s a0, v12
+; RV32ZBS-NEXT: andi a0, a0, 1
+; RV32ZBS-NEXT: ret
+;
+; RV64ZBS-LABEL: extractelt_v64i1_idx0:
+; RV64ZBS: # %bb.0:
+; RV64ZBS-NEXT: li a1, 64
+; RV64ZBS-NEXT: vsetvli zero, a1, e8, m4, ta, ma
+; RV64ZBS-NEXT: vle8.v v8, (a0)
+; RV64ZBS-NEXT: vmseq.vi v12, v8, 0
+; RV64ZBS-NEXT: vsetivli zero, 0, e64, m1, ta, ma
+; RV64ZBS-NEXT: vmv.x.s a0, v12
+; RV64ZBS-NEXT: andi a0, a0, 1
+; RV64ZBS-NEXT: ret
+ %a = load <64 x i8>, <64 x i8>* %x
+ %b = icmp eq <64 x i8> %a, zeroinitializer
+ %c = extractelement <64 x i1> %b, i64 0
+ ret i1 %c
+}
+
+define i1 @extractelt_v128i1_idx0(<128 x i8>* %x) nounwind {
+; RV32-LABEL: extractelt_v128i1_idx0:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 128
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT: vle8.v v8, (a0)
+; RV32-NEXT: vmseq.vi v16, v8, 0
+; RV32-NEXT: vsetivli zero, 0, e32, m1, ta, ma
+; RV32-NEXT: vmv.x.s a0, v16
+; RV32-NEXT: andi a0, a0, 1
+; RV32-NEXT: ret
+;
+; RV64-LABEL: extractelt_v128i1_idx0:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 128
+; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT: vle8.v v8, (a0)
+; RV64-NEXT: vmseq.vi v16, v8, 0
+; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma
+; RV64-NEXT: vmv.x.s a0, v16
+; RV64-NEXT: andi a0, a0, 1
+; RV64-NEXT: ret
+;
+; RV32ZBS-LABEL: extractelt_v128i1_idx0:
+; RV32ZBS: # %bb.0:
+; RV32ZBS-NEXT: li a1, 128
+; RV32ZBS-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV32ZBS-NEXT: vle8.v v8, (a0)
+; RV32ZBS-NEXT: vmseq.vi v16, v8, 0
+; RV32ZBS-NEXT: vsetivli zero, 0, e32, m1, ta, ma
+; RV32ZBS-NEXT: vmv.x.s a0, v16
+; RV32ZBS-NEXT: andi a0, a0, 1
+; RV32ZBS-NEXT: ret
+;
+; RV64ZBS-LABEL: extractelt_v128i1_idx0:
+; RV64ZBS: # %bb.0:
+; RV64ZBS-NEXT: li a1, 128
+; RV64ZBS-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV64ZBS-NEXT: vle8.v v8, (a0)
+; RV64ZBS-NEXT: vmseq.vi v16, v8, 0
+; RV64ZBS-NEXT: vsetivli zero, 0, e64, m1, ta, ma
+; RV64ZBS-NEXT: vmv.x.s a0, v16
+; RV64ZBS-NEXT: andi a0, a0, 1
+; RV64ZBS-NEXT: ret
+ %a = load <128 x i8>, <128 x i8>* %x
+ %b = icmp eq <128 x i8> %a, zeroinitializer
+ %c = extractelement <128 x i1> %b, i64 0
+ ret i1 %c
+}
+
+define i1 @extractelt_v256i1_idx0(<256 x i8>* %x) nounwind {
+; RV32-LABEL: extractelt_v256i1_idx0:
+; RV32: # %bb.0:
+; RV32-NEXT: li a1, 128
+; RV32-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV32-NEXT: vle8.v v8, (a0)
+; RV32-NEXT: vmseq.vi v16, v8, 0
+; RV32-NEXT: vsetivli zero, 0, e32, m1, ta, ma
+; RV32-NEXT: vmv.x.s a0, v16
+; RV32-NEXT: andi a0, a0, 1
+; RV32-NEXT: ret
+;
+; RV64-LABEL: extractelt_v256i1_idx0:
+; RV64: # %bb.0:
+; RV64-NEXT: li a1, 128
+; RV64-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV64-NEXT: vle8.v v8, (a0)
+; RV64-NEXT: vmseq.vi v16, v8, 0
+; RV64-NEXT: vsetivli zero, 0, e64, m1, ta, ma
+; RV64-NEXT: vmv.x.s a0, v16
+; RV64-NEXT: andi a0, a0, 1
+; RV64-NEXT: ret
+;
+; RV32ZBS-LABEL: extractelt_v256i1_idx0:
+; RV32ZBS: # %bb.0:
+; RV32ZBS-NEXT: li a1, 128
+; RV32ZBS-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV32ZBS-NEXT: vle8.v v8, (a0)
+; RV32ZBS-NEXT: vmseq.vi v16, v8, 0
+; RV32ZBS-NEXT: vsetivli zero, 0, e32, m1, ta, ma
+; RV32ZBS-NEXT: vmv.x.s a0, v16
+; RV32ZBS-NEXT: andi a0, a0, 1
+; RV32ZBS-NEXT: ret
+;
+; RV64ZBS-LABEL: extractelt_v256i1_idx0:
+; RV64ZBS: # %bb.0:
+; RV64ZBS-NEXT: li a1, 128
+; RV64ZBS-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; RV64ZBS-NEXT: vle8.v v8, (a0)
+; RV64ZBS-NEXT: vmseq.vi v16, v8, 0
+; RV64ZBS-NEXT: vsetivli zero, 0, e64, m1, ta, ma
+; RV64ZBS-NEXT: vmv.x.s a0, v16
+; RV64ZBS-NEXT: andi a0, a0, 1
+; RV64ZBS-NEXT: ret
+ %a = load <256 x i8>, <256 x i8>* %x
+ %b = icmp eq <256 x i8> %a, zeroinitializer
+ %c = extractelement <256 x i1> %b, i64 0
+ ret i1 %c
+}
More information about the llvm-commits
mailing list