[llvm] 1f342f9 - [RISCV] Add coverage for recently added vectorization intrinsics

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 20 09:48:41 PST 2024


Author: Philip Reames
Date: 2024-11-20T09:48:32-08:00
New Revision: 1f342f94b258bbf31efa2a6dc458229832fb5c6f

URL: https://github.com/llvm/llvm-project/commit/1f342f94b258bbf31efa2a6dc458229832fb5c6f
DIFF: https://github.com/llvm/llvm-project/commit/1f342f94b258bbf31efa2a6dc458229832fb5c6f.diff

LOG: [RISCV] Add coverage for recently added vectorization intrinsics

vector.match was added in e52238.
extract.last.active was added in ed5aad.

We have oppurtunities for better codegen in both, but neither are
terrible out of the box.

Added: 
    llvm/test/CodeGen/RISCV/rvv/intrinsic-vector-match.ll
    llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/intrinsic-vector-match.ll b/llvm/test/CodeGen/RISCV/rvv/intrinsic-vector-match.ll
new file mode 100644
index 00000000000000..e70dcd16d02cd2
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/intrinsic-vector-match.ll
@@ -0,0 +1,1387 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvfh -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,RV32
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvfh -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,RV64
+
+define <vscale x 16 x i1> @match_nxv16i8_v1i8(<vscale x 16 x i8> %op1, <1 x i8> %op2, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: match_nxv16i8_v1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v10
+; CHECK-NEXT:    vmseq.vx v10, v8, a0
+; CHECK-NEXT:    vmand.mm v0, v10, v0
+; CHECK-NEXT:    ret
+  %r = tail call <vscale x 16 x i1> @llvm.experimental.vector.match(<vscale x 16 x i8> %op1, <1 x i8> %op2, <vscale x 16 x i1> %mask)
+  ret <vscale x 16 x i1> %r
+}
+
+define <vscale x 16 x i1> @match_nxv16i8_v2i8(<vscale x 16 x i8> %op1, <2 x i8> %op2, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: match_nxv16i8_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v10
+; CHECK-NEXT:    vslidedown.vi v10, v10, 1
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmseq.vx v11, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v10
+; CHECK-NEXT:    vmseq.vx v10, v8, a0
+; CHECK-NEXT:    vmor.mm v8, v11, v10
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <vscale x 16 x i1> @llvm.experimental.vector.match(<vscale x 16 x i8> %op1, <2 x i8> %op2, <vscale x 16 x i1> %mask)
+  ret <vscale x 16 x i1> %r
+}
+
+define <vscale x 16 x i1> @match_nxv16i8_v4i8(<vscale x 16 x i8> %op1, <4 x i8> %op2, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: match_nxv16i8_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v10
+; CHECK-NEXT:    vslidedown.vi v11, v10, 1
+; CHECK-NEXT:    vslidedown.vi v12, v10, 2
+; CHECK-NEXT:    vslidedown.vi v10, v10, 3
+; CHECK-NEXT:    vmv.x.s a1, v11
+; CHECK-NEXT:    vsetvli a2, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmseq.vx v11, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v12
+; CHECK-NEXT:    vmseq.vx v12, v8, a1
+; CHECK-NEXT:    vmv.x.s a1, v10
+; CHECK-NEXT:    vmseq.vx v10, v8, a0
+; CHECK-NEXT:    vmor.mm v11, v11, v12
+; CHECK-NEXT:    vmor.mm v10, v11, v10
+; CHECK-NEXT:    vmseq.vx v11, v8, a1
+; CHECK-NEXT:    vmor.mm v8, v10, v11
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <vscale x 16 x i1> @llvm.experimental.vector.match(<vscale x 16 x i8> %op1, <4 x i8> %op2, <vscale x 16 x i1> %mask)
+  ret <vscale x 16 x i1> %r
+}
+
+define <vscale x 16 x i1> @match_nxv16i8_v8i8(<vscale x 16 x i8> %op1, <8 x i8> %op2, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: match_nxv16i8_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v10
+; CHECK-NEXT:    vslidedown.vi v11, v10, 1
+; CHECK-NEXT:    vslidedown.vi v12, v10, 2
+; CHECK-NEXT:    vmv.x.s a1, v11
+; CHECK-NEXT:    vslidedown.vi v11, v10, 3
+; CHECK-NEXT:    vmv.x.s a2, v12
+; CHECK-NEXT:    vslidedown.vi v12, v10, 4
+; CHECK-NEXT:    vmv.x.s a3, v11
+; CHECK-NEXT:    vslidedown.vi v11, v10, 5
+; CHECK-NEXT:    vmv.x.s a4, v12
+; CHECK-NEXT:    vslidedown.vi v12, v10, 6
+; CHECK-NEXT:    vslidedown.vi v10, v10, 7
+; CHECK-NEXT:    vmv.x.s a5, v11
+; CHECK-NEXT:    vsetvli a6, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmseq.vx v11, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v12
+; CHECK-NEXT:    vmseq.vx v12, v8, a1
+; CHECK-NEXT:    vmv.x.s a1, v10
+; CHECK-NEXT:    vmseq.vx v10, v8, a2
+; CHECK-NEXT:    vmor.mm v11, v11, v12
+; CHECK-NEXT:    vmseq.vx v12, v8, a3
+; CHECK-NEXT:    vmor.mm v10, v11, v10
+; CHECK-NEXT:    vmseq.vx v11, v8, a4
+; CHECK-NEXT:    vmor.mm v10, v10, v12
+; CHECK-NEXT:    vmseq.vx v12, v8, a5
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, a0
+; CHECK-NEXT:    vmor.mm v10, v10, v12
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, a1
+; CHECK-NEXT:    vmor.mm v8, v10, v11
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <vscale x 16 x i1> @llvm.experimental.vector.match(<vscale x 16 x i8> %op1, <8 x i8> %op2, <vscale x 16 x i1> %mask)
+  ret <vscale x 16 x i1> %r
+}
+
+define <vscale x 16 x i1> @match_nxv16i8_v16i8(<vscale x 16 x i8> %op1, <16 x i8> %op2, <vscale x 16 x i1> %mask) {
+; CHECK-LABEL: match_nxv16i8_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v10
+; CHECK-NEXT:    vslidedown.vi v11, v10, 1
+; CHECK-NEXT:    vslidedown.vi v12, v10, 2
+; CHECK-NEXT:    vmv.x.s a1, v11
+; CHECK-NEXT:    vslidedown.vi v11, v10, 3
+; CHECK-NEXT:    vmv.x.s a2, v12
+; CHECK-NEXT:    vslidedown.vi v12, v10, 4
+; CHECK-NEXT:    vmv.x.s a3, v11
+; CHECK-NEXT:    vslidedown.vi v11, v10, 5
+; CHECK-NEXT:    vmv.x.s a4, v12
+; CHECK-NEXT:    vslidedown.vi v12, v10, 6
+; CHECK-NEXT:    vmv.x.s a5, v11
+; CHECK-NEXT:    vslidedown.vi v11, v10, 7
+; CHECK-NEXT:    vmv.x.s a6, v12
+; CHECK-NEXT:    vslidedown.vi v12, v10, 8
+; CHECK-NEXT:    vmv.x.s a7, v11
+; CHECK-NEXT:    vslidedown.vi v11, v10, 9
+; CHECK-NEXT:    vmv.x.s t0, v12
+; CHECK-NEXT:    vslidedown.vi v12, v10, 10
+; CHECK-NEXT:    vmv.x.s t1, v11
+; CHECK-NEXT:    vslidedown.vi v11, v10, 11
+; CHECK-NEXT:    vmv.x.s t2, v12
+; CHECK-NEXT:    vslidedown.vi v12, v10, 12
+; CHECK-NEXT:    vmv.x.s t3, v11
+; CHECK-NEXT:    vslidedown.vi v11, v10, 13
+; CHECK-NEXT:    vmv.x.s t4, v12
+; CHECK-NEXT:    vslidedown.vi v12, v10, 14
+; CHECK-NEXT:    vslidedown.vi v10, v10, 15
+; CHECK-NEXT:    vmv.x.s t5, v11
+; CHECK-NEXT:    vsetvli t6, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vmseq.vx v11, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v12
+; CHECK-NEXT:    vmseq.vx v12, v8, a1
+; CHECK-NEXT:    vmv.x.s a1, v10
+; CHECK-NEXT:    vmseq.vx v10, v8, a2
+; CHECK-NEXT:    vmor.mm v11, v11, v12
+; CHECK-NEXT:    vmseq.vx v12, v8, a3
+; CHECK-NEXT:    vmor.mm v10, v11, v10
+; CHECK-NEXT:    vmseq.vx v11, v8, a4
+; CHECK-NEXT:    vmor.mm v10, v10, v12
+; CHECK-NEXT:    vmseq.vx v12, v8, a5
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, a6
+; CHECK-NEXT:    vmor.mm v10, v10, v12
+; CHECK-NEXT:    vmseq.vx v12, v8, a7
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, t0
+; CHECK-NEXT:    vmor.mm v10, v10, v12
+; CHECK-NEXT:    vmseq.vx v12, v8, t1
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, t2
+; CHECK-NEXT:    vmor.mm v10, v10, v12
+; CHECK-NEXT:    vmseq.vx v12, v8, t3
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, t4
+; CHECK-NEXT:    vmor.mm v10, v10, v12
+; CHECK-NEXT:    vmseq.vx v12, v8, t5
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, a0
+; CHECK-NEXT:    vmor.mm v10, v10, v12
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, a1
+; CHECK-NEXT:    vmor.mm v8, v10, v11
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <vscale x 16 x i1> @llvm.experimental.vector.match(<vscale x 16 x i8> %op1, <16 x i8> %op2, <vscale x 16 x i1> %mask)
+  ret <vscale x 16 x i1> %r
+}
+
+define <16 x i1> @match_v16i8_v1i8(<16 x i8> %op1, <1 x i8> %op2, <16 x i1> %mask) {
+; CHECK-LABEL: match_v16i8_v1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v9
+; CHECK-NEXT:    vmseq.vx v8, v8, a0
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <16 x i1> @llvm.experimental.vector.match(<16 x i8> %op1, <1 x i8> %op2, <16 x i1> %mask)
+  ret <16 x i1> %r
+}
+
+define <16 x i1> @match_v16i8_v2i8(<16 x i8> %op1, <2 x i8> %op2, <16 x i1> %mask) {
+; CHECK-LABEL: match_v16i8_v2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v9
+; CHECK-NEXT:    vslidedown.vi v9, v9, 1
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vmseq.vx v10, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v9
+; CHECK-NEXT:    vmseq.vx v8, v8, a0
+; CHECK-NEXT:    vmor.mm v8, v10, v8
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <16 x i1> @llvm.experimental.vector.match(<16 x i8> %op1, <2 x i8> %op2, <16 x i1> %mask)
+  ret <16 x i1> %r
+}
+
+define <16 x i1> @match_v16i8_v4i8(<16 x i8> %op1, <4 x i8> %op2, <16 x i1> %mask) {
+; CHECK-LABEL: match_v16i8_v4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v9
+; CHECK-NEXT:    vslidedown.vi v10, v9, 1
+; CHECK-NEXT:    vslidedown.vi v11, v9, 2
+; CHECK-NEXT:    vslidedown.vi v9, v9, 3
+; CHECK-NEXT:    vmv.x.s a1, v10
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vmseq.vx v10, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, a1
+; CHECK-NEXT:    vmv.x.s a1, v9
+; CHECK-NEXT:    vmseq.vx v9, v8, a0
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vmor.mm v9, v10, v9
+; CHECK-NEXT:    vmseq.vx v8, v8, a1
+; CHECK-NEXT:    vmor.mm v8, v9, v8
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <16 x i1> @llvm.experimental.vector.match(<16 x i8> %op1, <4 x i8> %op2, <16 x i1> %mask)
+  ret <16 x i1> %r
+}
+
+define <16 x i1> @match_v16i8_v8i8(<16 x i8> %op1, <8 x i8> %op2, <16 x i1> %mask) {
+; CHECK-LABEL: match_v16i8_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v9
+; CHECK-NEXT:    vslidedown.vi v10, v9, 1
+; CHECK-NEXT:    vslidedown.vi v11, v9, 2
+; CHECK-NEXT:    vmv.x.s a1, v10
+; CHECK-NEXT:    vslidedown.vi v10, v9, 3
+; CHECK-NEXT:    vmv.x.s a2, v11
+; CHECK-NEXT:    vslidedown.vi v11, v9, 4
+; CHECK-NEXT:    vmv.x.s a3, v10
+; CHECK-NEXT:    vslidedown.vi v10, v9, 5
+; CHECK-NEXT:    vmv.x.s a4, v11
+; CHECK-NEXT:    vslidedown.vi v11, v9, 6
+; CHECK-NEXT:    vslidedown.vi v9, v9, 7
+; CHECK-NEXT:    vmv.x.s a5, v10
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vmseq.vx v10, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, a1
+; CHECK-NEXT:    vmv.x.s a1, v9
+; CHECK-NEXT:    vmseq.vx v9, v8, a2
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, a3
+; CHECK-NEXT:    vmor.mm v9, v10, v9
+; CHECK-NEXT:    vmseq.vx v10, v8, a4
+; CHECK-NEXT:    vmor.mm v9, v9, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, a5
+; CHECK-NEXT:    vmor.mm v9, v9, v10
+; CHECK-NEXT:    vmseq.vx v10, v8, a0
+; CHECK-NEXT:    vmor.mm v9, v9, v11
+; CHECK-NEXT:    vmor.mm v9, v9, v10
+; CHECK-NEXT:    vmseq.vx v8, v8, a1
+; CHECK-NEXT:    vmor.mm v8, v9, v8
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <16 x i1> @llvm.experimental.vector.match(<16 x i8> %op1, <8 x i8> %op2, <16 x i1> %mask)
+  ret <16 x i1> %r
+}
+
+define <16 x i1> @match_v16i8_v16i8(<16 x i8> %op1, <16 x i8> %op2, <16 x i1> %mask) {
+; CHECK-LABEL: match_v16i8_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT:    vrgather.vi v10, v9, 1
+; CHECK-NEXT:    vrgather.vi v11, v9, 0
+; CHECK-NEXT:    vmseq.vv v10, v8, v10
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v11, v10
+; CHECK-NEXT:    vrgather.vi v11, v9, 2
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 3
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 4
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 5
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 6
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 7
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 8
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 9
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 10
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 11
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 12
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 13
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 14
+; CHECK-NEXT:    vrgather.vi v12, v9, 15
+; CHECK-NEXT:    vmseq.vv v9, v8, v11
+; CHECK-NEXT:    vmor.mm v9, v10, v9
+; CHECK-NEXT:    vmseq.vv v8, v8, v12
+; CHECK-NEXT:    vmor.mm v8, v9, v8
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <16 x i1> @llvm.experimental.vector.match(<16 x i8> %op1, <16 x i8> %op2, <16 x i1> %mask)
+  ret <16 x i1> %r
+}
+
+define <8 x i1> @match_v8i8_v8i8(<8 x i8> %op1, <8 x i8> %op2, <8 x i1> %mask) {
+; CHECK-LABEL: match_v8i8_v8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vrgather.vi v10, v9, 1
+; CHECK-NEXT:    vrgather.vi v11, v9, 0
+; CHECK-NEXT:    vmseq.vv v10, v8, v10
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v11, v10
+; CHECK-NEXT:    vrgather.vi v11, v9, 2
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 3
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 4
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 5
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 6
+; CHECK-NEXT:    vrgather.vi v12, v9, 7
+; CHECK-NEXT:    vmseq.vv v9, v8, v11
+; CHECK-NEXT:    vmor.mm v9, v10, v9
+; CHECK-NEXT:    vmseq.vv v8, v8, v12
+; CHECK-NEXT:    vmor.mm v8, v9, v8
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <8 x i1> @llvm.experimental.vector.match(<8 x i8> %op1, <8 x i8> %op2, <8 x i1> %mask)
+  ret <8 x i1> %r
+}
+
+define <vscale x 8 x i1> @match_nxv8i16_v8i16(<vscale x 8 x i16> %op1, <8 x i16> %op2, <vscale x 8 x i1> %mask) {
+; CHECK-LABEL: match_nxv8i16_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v10
+; CHECK-NEXT:    vslidedown.vi v11, v10, 1
+; CHECK-NEXT:    vslidedown.vi v12, v10, 2
+; CHECK-NEXT:    vmv.x.s a1, v11
+; CHECK-NEXT:    vslidedown.vi v11, v10, 3
+; CHECK-NEXT:    vmv.x.s a2, v12
+; CHECK-NEXT:    vslidedown.vi v12, v10, 4
+; CHECK-NEXT:    vmv.x.s a3, v11
+; CHECK-NEXT:    vslidedown.vi v11, v10, 5
+; CHECK-NEXT:    vmv.x.s a4, v12
+; CHECK-NEXT:    vslidedown.vi v12, v10, 6
+; CHECK-NEXT:    vslidedown.vi v10, v10, 7
+; CHECK-NEXT:    vmv.x.s a5, v11
+; CHECK-NEXT:    vsetvli a6, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vmseq.vx v11, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v12
+; CHECK-NEXT:    vmseq.vx v12, v8, a1
+; CHECK-NEXT:    vmv.x.s a1, v10
+; CHECK-NEXT:    vmseq.vx v10, v8, a2
+; CHECK-NEXT:    vmor.mm v11, v11, v12
+; CHECK-NEXT:    vmseq.vx v12, v8, a3
+; CHECK-NEXT:    vmor.mm v10, v11, v10
+; CHECK-NEXT:    vmseq.vx v11, v8, a4
+; CHECK-NEXT:    vmor.mm v10, v10, v12
+; CHECK-NEXT:    vmseq.vx v12, v8, a5
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, a0
+; CHECK-NEXT:    vmor.mm v10, v10, v12
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, a1
+; CHECK-NEXT:    vmor.mm v8, v10, v11
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <vscale x 8 x i1> @llvm.experimental.vector.match(<vscale x 8 x i16> %op1, <8 x i16> %op2, <vscale x 8 x i1> %mask)
+  ret <vscale x 8 x i1> %r
+}
+
+define <8 x i1> @match_v8i16(<8 x i16> %op1, <8 x i16> %op2, <8 x i1> %mask) {
+; CHECK-LABEL: match_v8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vrgather.vi v10, v9, 1
+; CHECK-NEXT:    vrgather.vi v11, v9, 0
+; CHECK-NEXT:    vmseq.vv v10, v8, v10
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v11, v10
+; CHECK-NEXT:    vrgather.vi v11, v9, 2
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 3
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 4
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 5
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vrgather.vi v11, v9, 6
+; CHECK-NEXT:    vrgather.vi v12, v9, 7
+; CHECK-NEXT:    vmseq.vv v9, v8, v11
+; CHECK-NEXT:    vmor.mm v9, v10, v9
+; CHECK-NEXT:    vmseq.vv v8, v8, v12
+; CHECK-NEXT:    vmor.mm v8, v9, v8
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <8 x i1> @llvm.experimental.vector.match(<8 x i16> %op1, <8 x i16> %op2, <8 x i1> %mask)
+  ret <8 x i1> %r
+}
+
+; Cases where op2 has more elements than op1.
+
+define <8 x i1> @match_v8i8_v16i8(<8 x i8> %op1, <16 x i8> %op2, <8 x i1> %mask) {
+; CHECK-LABEL: match_v8i8_v16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v9
+; CHECK-NEXT:    vslidedown.vi v10, v9, 1
+; CHECK-NEXT:    vslidedown.vi v11, v9, 2
+; CHECK-NEXT:    vmv.x.s a1, v10
+; CHECK-NEXT:    vslidedown.vi v10, v9, 3
+; CHECK-NEXT:    vmv.x.s a2, v11
+; CHECK-NEXT:    vslidedown.vi v11, v9, 4
+; CHECK-NEXT:    vmv.x.s a3, v10
+; CHECK-NEXT:    vslidedown.vi v10, v9, 5
+; CHECK-NEXT:    vmv.x.s a4, v11
+; CHECK-NEXT:    vslidedown.vi v11, v9, 6
+; CHECK-NEXT:    vmv.x.s a5, v10
+; CHECK-NEXT:    vslidedown.vi v10, v9, 7
+; CHECK-NEXT:    vmv.x.s a6, v11
+; CHECK-NEXT:    vslidedown.vi v11, v9, 8
+; CHECK-NEXT:    vmv.x.s a7, v10
+; CHECK-NEXT:    vslidedown.vi v10, v9, 9
+; CHECK-NEXT:    vmv.x.s t0, v11
+; CHECK-NEXT:    vslidedown.vi v11, v9, 10
+; CHECK-NEXT:    vmv.x.s t1, v10
+; CHECK-NEXT:    vslidedown.vi v10, v9, 11
+; CHECK-NEXT:    vmv.x.s t2, v11
+; CHECK-NEXT:    vslidedown.vi v11, v9, 12
+; CHECK-NEXT:    vmv.x.s t3, v10
+; CHECK-NEXT:    vslidedown.vi v10, v9, 13
+; CHECK-NEXT:    vmv.x.s t4, v11
+; CHECK-NEXT:    vslidedown.vi v11, v9, 14
+; CHECK-NEXT:    vslidedown.vi v9, v9, 15
+; CHECK-NEXT:    vmv.x.s t5, v10
+; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT:    vmseq.vx v10, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, a1
+; CHECK-NEXT:    vmv.x.s a1, v9
+; CHECK-NEXT:    vmseq.vx v9, v8, a2
+; CHECK-NEXT:    vmor.mm v10, v10, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, a3
+; CHECK-NEXT:    vmor.mm v9, v10, v9
+; CHECK-NEXT:    vmseq.vx v10, v8, a4
+; CHECK-NEXT:    vmor.mm v9, v9, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, a5
+; CHECK-NEXT:    vmor.mm v9, v9, v10
+; CHECK-NEXT:    vmseq.vx v10, v8, a6
+; CHECK-NEXT:    vmor.mm v9, v9, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, a7
+; CHECK-NEXT:    vmor.mm v9, v9, v10
+; CHECK-NEXT:    vmseq.vx v10, v8, t0
+; CHECK-NEXT:    vmor.mm v9, v9, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, t1
+; CHECK-NEXT:    vmor.mm v9, v9, v10
+; CHECK-NEXT:    vmseq.vx v10, v8, t2
+; CHECK-NEXT:    vmor.mm v9, v9, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, t3
+; CHECK-NEXT:    vmor.mm v9, v9, v10
+; CHECK-NEXT:    vmseq.vx v10, v8, t4
+; CHECK-NEXT:    vmor.mm v9, v9, v11
+; CHECK-NEXT:    vmseq.vx v11, v8, t5
+; CHECK-NEXT:    vmor.mm v9, v9, v10
+; CHECK-NEXT:    vmseq.vx v10, v8, a0
+; CHECK-NEXT:    vmor.mm v9, v9, v11
+; CHECK-NEXT:    vmor.mm v9, v9, v10
+; CHECK-NEXT:    vmseq.vx v8, v8, a1
+; CHECK-NEXT:    vmor.mm v8, v9, v8
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <8 x i1> @llvm.experimental.vector.match(<8 x i8> %op1, <16 x i8> %op2, <8 x i1> %mask)
+  ret <8 x i1> %r
+}
+
+define <vscale x 16 x i1> @match_nxv16i8_v32i8(<vscale x 16 x i8> %op1, <32 x i8> %op2, <vscale x 16 x i1> %mask) {
+; RV32-LABEL: match_nxv16i8_v32i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -64
+; RV32-NEXT:    .cfi_def_cfa_offset 64
+; RV32-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s1, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s2, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s3, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s4, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s5, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s6, 32(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s7, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s8, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s9, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s10, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s11, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    .cfi_offset s1, -12
+; RV32-NEXT:    .cfi_offset s2, -16
+; RV32-NEXT:    .cfi_offset s3, -20
+; RV32-NEXT:    .cfi_offset s4, -24
+; RV32-NEXT:    .cfi_offset s5, -28
+; RV32-NEXT:    .cfi_offset s6, -32
+; RV32-NEXT:    .cfi_offset s7, -36
+; RV32-NEXT:    .cfi_offset s8, -40
+; RV32-NEXT:    .cfi_offset s9, -44
+; RV32-NEXT:    .cfi_offset s10, -48
+; RV32-NEXT:    .cfi_offset s11, -52
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a0, v10
+; RV32-NEXT:    sw a0, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT:    vslidedown.vi v12, v10, 1
+; RV32-NEXT:    vslidedown.vi v13, v10, 2
+; RV32-NEXT:    vslidedown.vi v14, v10, 3
+; RV32-NEXT:    vslidedown.vi v15, v10, 4
+; RV32-NEXT:    vslidedown.vi v16, v10, 5
+; RV32-NEXT:    vslidedown.vi v17, v10, 6
+; RV32-NEXT:    vslidedown.vi v18, v10, 7
+; RV32-NEXT:    vslidedown.vi v19, v10, 8
+; RV32-NEXT:    vslidedown.vi v20, v10, 9
+; RV32-NEXT:    vslidedown.vi v21, v10, 10
+; RV32-NEXT:    vslidedown.vi v22, v10, 11
+; RV32-NEXT:    vslidedown.vi v23, v10, 12
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v24, v10, 16
+; RV32-NEXT:    vmv.x.s a1, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 17
+; RV32-NEXT:    vmv.x.s a2, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 18
+; RV32-NEXT:    vmv.x.s a3, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 19
+; RV32-NEXT:    vmv.x.s a4, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 20
+; RV32-NEXT:    vmv.x.s a5, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 21
+; RV32-NEXT:    vmv.x.s a6, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 22
+; RV32-NEXT:    vmv.x.s a7, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 23
+; RV32-NEXT:    vmv.x.s t0, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 24
+; RV32-NEXT:    vmv.x.s t1, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 25
+; RV32-NEXT:    vmv.x.s t2, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 26
+; RV32-NEXT:    vmv.x.s t3, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 27
+; RV32-NEXT:    vmv.x.s t4, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 28
+; RV32-NEXT:    vmv.x.s t5, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 29
+; RV32-NEXT:    vmv.x.s t6, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 30
+; RV32-NEXT:    vmv.x.s s0, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 31
+; RV32-NEXT:    vmv.x.s s1, v24
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v11, v10, 13
+; RV32-NEXT:    vslidedown.vi v24, v10, 14
+; RV32-NEXT:    vslidedown.vi v10, v10, 15
+; RV32-NEXT:    vmv.x.s s2, v12
+; RV32-NEXT:    vmv.x.s s3, v13
+; RV32-NEXT:    vmv.x.s s4, v14
+; RV32-NEXT:    vmv.x.s s5, v15
+; RV32-NEXT:    vmv.x.s s6, v16
+; RV32-NEXT:    vmv.x.s s7, v17
+; RV32-NEXT:    vmv.x.s s8, v18
+; RV32-NEXT:    vmv.x.s s9, v19
+; RV32-NEXT:    vmv.x.s s10, v20
+; RV32-NEXT:    vmv.x.s s11, v21
+; RV32-NEXT:    vmv.x.s ra, v22
+; RV32-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; RV32-NEXT:    lw a0, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT:    vmseq.vx v12, v8, a0
+; RV32-NEXT:    vmv.x.s a0, v23
+; RV32-NEXT:    vmseq.vx v13, v8, s2
+; RV32-NEXT:    vmv.x.s s2, v11
+; RV32-NEXT:    vmseq.vx v11, v8, s3
+; RV32-NEXT:    vmv.x.s s3, v24
+; RV32-NEXT:    vmseq.vx v14, v8, s4
+; RV32-NEXT:    vmv.x.s s4, v10
+; RV32-NEXT:    vmseq.vx v10, v8, s5
+; RV32-NEXT:    vmor.mm v12, v12, v13
+; RV32-NEXT:    vmseq.vx v13, v8, s6
+; RV32-NEXT:    vmor.mm v11, v12, v11
+; RV32-NEXT:    vmseq.vx v12, v8, s7
+; RV32-NEXT:    vmor.mm v11, v11, v14
+; RV32-NEXT:    vmseq.vx v14, v8, s8
+; RV32-NEXT:    vmor.mm v10, v11, v10
+; RV32-NEXT:    vmseq.vx v11, v8, s9
+; RV32-NEXT:    vmor.mm v10, v10, v13
+; RV32-NEXT:    vmseq.vx v13, v8, s10
+; RV32-NEXT:    vmor.mm v10, v10, v12
+; RV32-NEXT:    vmseq.vx v12, v8, s11
+; RV32-NEXT:    vmor.mm v10, v10, v14
+; RV32-NEXT:    vmseq.vx v14, v8, ra
+; RV32-NEXT:    vmor.mm v10, v10, v11
+; RV32-NEXT:    vmseq.vx v11, v8, a0
+; RV32-NEXT:    vmor.mm v10, v10, v13
+; RV32-NEXT:    vmseq.vx v13, v8, s2
+; RV32-NEXT:    vmor.mm v10, v10, v12
+; RV32-NEXT:    vmseq.vx v12, v8, s3
+; RV32-NEXT:    vmor.mm v10, v10, v14
+; RV32-NEXT:    vmseq.vx v14, v8, s4
+; RV32-NEXT:    vmor.mm v10, v10, v11
+; RV32-NEXT:    vmseq.vx v11, v8, a1
+; RV32-NEXT:    vmor.mm v10, v10, v13
+; RV32-NEXT:    vmseq.vx v13, v8, a2
+; RV32-NEXT:    vmor.mm v10, v10, v12
+; RV32-NEXT:    vmseq.vx v12, v8, a3
+; RV32-NEXT:    vmor.mm v10, v10, v14
+; RV32-NEXT:    vmseq.vx v14, v8, a4
+; RV32-NEXT:    vmor.mm v10, v10, v11
+; RV32-NEXT:    vmseq.vx v11, v8, a5
+; RV32-NEXT:    vmor.mm v10, v10, v13
+; RV32-NEXT:    vmseq.vx v13, v8, a6
+; RV32-NEXT:    vmor.mm v10, v10, v12
+; RV32-NEXT:    vmseq.vx v12, v8, a7
+; RV32-NEXT:    vmor.mm v10, v10, v14
+; RV32-NEXT:    vmseq.vx v14, v8, t0
+; RV32-NEXT:    vmor.mm v10, v10, v11
+; RV32-NEXT:    vmseq.vx v11, v8, t1
+; RV32-NEXT:    vmor.mm v10, v10, v13
+; RV32-NEXT:    vmseq.vx v13, v8, t2
+; RV32-NEXT:    vmor.mm v10, v10, v12
+; RV32-NEXT:    vmseq.vx v12, v8, t3
+; RV32-NEXT:    vmor.mm v10, v10, v14
+; RV32-NEXT:    vmseq.vx v14, v8, t4
+; RV32-NEXT:    vmor.mm v10, v10, v11
+; RV32-NEXT:    vmseq.vx v11, v8, t5
+; RV32-NEXT:    vmor.mm v10, v10, v13
+; RV32-NEXT:    vmseq.vx v13, v8, t6
+; RV32-NEXT:    vmor.mm v10, v10, v12
+; RV32-NEXT:    vmseq.vx v12, v8, s0
+; RV32-NEXT:    vmor.mm v10, v10, v14
+; RV32-NEXT:    vmor.mm v10, v10, v11
+; RV32-NEXT:    vmor.mm v10, v10, v13
+; RV32-NEXT:    vmor.mm v10, v10, v12
+; RV32-NEXT:    vmseq.vx v11, v8, s1
+; RV32-NEXT:    vmor.mm v8, v10, v11
+; RV32-NEXT:    vmand.mm v0, v8, v0
+; RV32-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s2, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s3, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s4, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s5, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s6, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s7, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s8, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    .cfi_restore s1
+; RV32-NEXT:    .cfi_restore s2
+; RV32-NEXT:    .cfi_restore s3
+; RV32-NEXT:    .cfi_restore s4
+; RV32-NEXT:    .cfi_restore s5
+; RV32-NEXT:    .cfi_restore s6
+; RV32-NEXT:    .cfi_restore s7
+; RV32-NEXT:    .cfi_restore s8
+; RV32-NEXT:    .cfi_restore s9
+; RV32-NEXT:    .cfi_restore s10
+; RV32-NEXT:    .cfi_restore s11
+; RV32-NEXT:    addi sp, sp, 64
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: match_nxv16i8_v32i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -112
+; RV64-NEXT:    .cfi_def_cfa_offset 112
+; RV64-NEXT:    sd ra, 104(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 96(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s1, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s2, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s3, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s4, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s5, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s6, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s7, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s8, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s9, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s10, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s11, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    .cfi_offset s0, -16
+; RV64-NEXT:    .cfi_offset s1, -24
+; RV64-NEXT:    .cfi_offset s2, -32
+; RV64-NEXT:    .cfi_offset s3, -40
+; RV64-NEXT:    .cfi_offset s4, -48
+; RV64-NEXT:    .cfi_offset s5, -56
+; RV64-NEXT:    .cfi_offset s6, -64
+; RV64-NEXT:    .cfi_offset s7, -72
+; RV64-NEXT:    .cfi_offset s8, -80
+; RV64-NEXT:    .cfi_offset s9, -88
+; RV64-NEXT:    .cfi_offset s10, -96
+; RV64-NEXT:    .cfi_offset s11, -104
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a0, v10
+; RV64-NEXT:    sd a0, 0(sp) # 8-byte Folded Spill
+; RV64-NEXT:    vslidedown.vi v12, v10, 1
+; RV64-NEXT:    vslidedown.vi v13, v10, 2
+; RV64-NEXT:    vslidedown.vi v14, v10, 3
+; RV64-NEXT:    vslidedown.vi v15, v10, 4
+; RV64-NEXT:    vslidedown.vi v16, v10, 5
+; RV64-NEXT:    vslidedown.vi v17, v10, 6
+; RV64-NEXT:    vslidedown.vi v18, v10, 7
+; RV64-NEXT:    vslidedown.vi v19, v10, 8
+; RV64-NEXT:    vslidedown.vi v20, v10, 9
+; RV64-NEXT:    vslidedown.vi v21, v10, 10
+; RV64-NEXT:    vslidedown.vi v22, v10, 11
+; RV64-NEXT:    vslidedown.vi v23, v10, 12
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v24, v10, 16
+; RV64-NEXT:    vmv.x.s a1, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 17
+; RV64-NEXT:    vmv.x.s a2, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 18
+; RV64-NEXT:    vmv.x.s a3, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 19
+; RV64-NEXT:    vmv.x.s a4, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 20
+; RV64-NEXT:    vmv.x.s a5, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 21
+; RV64-NEXT:    vmv.x.s a6, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 22
+; RV64-NEXT:    vmv.x.s a7, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 23
+; RV64-NEXT:    vmv.x.s t0, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 24
+; RV64-NEXT:    vmv.x.s t1, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 25
+; RV64-NEXT:    vmv.x.s t2, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 26
+; RV64-NEXT:    vmv.x.s t3, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 27
+; RV64-NEXT:    vmv.x.s t4, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 28
+; RV64-NEXT:    vmv.x.s t5, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 29
+; RV64-NEXT:    vmv.x.s t6, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 30
+; RV64-NEXT:    vmv.x.s s0, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 31
+; RV64-NEXT:    vmv.x.s s1, v24
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v11, v10, 13
+; RV64-NEXT:    vslidedown.vi v24, v10, 14
+; RV64-NEXT:    vslidedown.vi v10, v10, 15
+; RV64-NEXT:    vmv.x.s s2, v12
+; RV64-NEXT:    vmv.x.s s3, v13
+; RV64-NEXT:    vmv.x.s s4, v14
+; RV64-NEXT:    vmv.x.s s5, v15
+; RV64-NEXT:    vmv.x.s s6, v16
+; RV64-NEXT:    vmv.x.s s7, v17
+; RV64-NEXT:    vmv.x.s s8, v18
+; RV64-NEXT:    vmv.x.s s9, v19
+; RV64-NEXT:    vmv.x.s s10, v20
+; RV64-NEXT:    vmv.x.s s11, v21
+; RV64-NEXT:    vmv.x.s ra, v22
+; RV64-NEXT:    vsetvli a0, zero, e8, m2, ta, ma
+; RV64-NEXT:    ld a0, 0(sp) # 8-byte Folded Reload
+; RV64-NEXT:    vmseq.vx v12, v8, a0
+; RV64-NEXT:    vmv.x.s a0, v23
+; RV64-NEXT:    vmseq.vx v13, v8, s2
+; RV64-NEXT:    vmv.x.s s2, v11
+; RV64-NEXT:    vmseq.vx v11, v8, s3
+; RV64-NEXT:    vmv.x.s s3, v24
+; RV64-NEXT:    vmseq.vx v14, v8, s4
+; RV64-NEXT:    vmv.x.s s4, v10
+; RV64-NEXT:    vmseq.vx v10, v8, s5
+; RV64-NEXT:    vmor.mm v12, v12, v13
+; RV64-NEXT:    vmseq.vx v13, v8, s6
+; RV64-NEXT:    vmor.mm v11, v12, v11
+; RV64-NEXT:    vmseq.vx v12, v8, s7
+; RV64-NEXT:    vmor.mm v11, v11, v14
+; RV64-NEXT:    vmseq.vx v14, v8, s8
+; RV64-NEXT:    vmor.mm v10, v11, v10
+; RV64-NEXT:    vmseq.vx v11, v8, s9
+; RV64-NEXT:    vmor.mm v10, v10, v13
+; RV64-NEXT:    vmseq.vx v13, v8, s10
+; RV64-NEXT:    vmor.mm v10, v10, v12
+; RV64-NEXT:    vmseq.vx v12, v8, s11
+; RV64-NEXT:    vmor.mm v10, v10, v14
+; RV64-NEXT:    vmseq.vx v14, v8, ra
+; RV64-NEXT:    vmor.mm v10, v10, v11
+; RV64-NEXT:    vmseq.vx v11, v8, a0
+; RV64-NEXT:    vmor.mm v10, v10, v13
+; RV64-NEXT:    vmseq.vx v13, v8, s2
+; RV64-NEXT:    vmor.mm v10, v10, v12
+; RV64-NEXT:    vmseq.vx v12, v8, s3
+; RV64-NEXT:    vmor.mm v10, v10, v14
+; RV64-NEXT:    vmseq.vx v14, v8, s4
+; RV64-NEXT:    vmor.mm v10, v10, v11
+; RV64-NEXT:    vmseq.vx v11, v8, a1
+; RV64-NEXT:    vmor.mm v10, v10, v13
+; RV64-NEXT:    vmseq.vx v13, v8, a2
+; RV64-NEXT:    vmor.mm v10, v10, v12
+; RV64-NEXT:    vmseq.vx v12, v8, a3
+; RV64-NEXT:    vmor.mm v10, v10, v14
+; RV64-NEXT:    vmseq.vx v14, v8, a4
+; RV64-NEXT:    vmor.mm v10, v10, v11
+; RV64-NEXT:    vmseq.vx v11, v8, a5
+; RV64-NEXT:    vmor.mm v10, v10, v13
+; RV64-NEXT:    vmseq.vx v13, v8, a6
+; RV64-NEXT:    vmor.mm v10, v10, v12
+; RV64-NEXT:    vmseq.vx v12, v8, a7
+; RV64-NEXT:    vmor.mm v10, v10, v14
+; RV64-NEXT:    vmseq.vx v14, v8, t0
+; RV64-NEXT:    vmor.mm v10, v10, v11
+; RV64-NEXT:    vmseq.vx v11, v8, t1
+; RV64-NEXT:    vmor.mm v10, v10, v13
+; RV64-NEXT:    vmseq.vx v13, v8, t2
+; RV64-NEXT:    vmor.mm v10, v10, v12
+; RV64-NEXT:    vmseq.vx v12, v8, t3
+; RV64-NEXT:    vmor.mm v10, v10, v14
+; RV64-NEXT:    vmseq.vx v14, v8, t4
+; RV64-NEXT:    vmor.mm v10, v10, v11
+; RV64-NEXT:    vmseq.vx v11, v8, t5
+; RV64-NEXT:    vmor.mm v10, v10, v13
+; RV64-NEXT:    vmseq.vx v13, v8, t6
+; RV64-NEXT:    vmor.mm v10, v10, v12
+; RV64-NEXT:    vmseq.vx v12, v8, s0
+; RV64-NEXT:    vmor.mm v10, v10, v14
+; RV64-NEXT:    vmor.mm v10, v10, v11
+; RV64-NEXT:    vmor.mm v10, v10, v13
+; RV64-NEXT:    vmor.mm v10, v10, v12
+; RV64-NEXT:    vmseq.vx v11, v8, s1
+; RV64-NEXT:    vmor.mm v8, v10, v11
+; RV64-NEXT:    vmand.mm v0, v8, v0
+; RV64-NEXT:    ld ra, 104(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 96(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s1, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s2, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s3, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s4, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s5, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s6, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s7, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s8, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s9, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s10, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    .cfi_restore s1
+; RV64-NEXT:    .cfi_restore s2
+; RV64-NEXT:    .cfi_restore s3
+; RV64-NEXT:    .cfi_restore s4
+; RV64-NEXT:    .cfi_restore s5
+; RV64-NEXT:    .cfi_restore s6
+; RV64-NEXT:    .cfi_restore s7
+; RV64-NEXT:    .cfi_restore s8
+; RV64-NEXT:    .cfi_restore s9
+; RV64-NEXT:    .cfi_restore s10
+; RV64-NEXT:    .cfi_restore s11
+; RV64-NEXT:    addi sp, sp, 112
+; RV64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-NEXT:    ret
+  %r = tail call <vscale x 16 x i1> @llvm.experimental.vector.match(<vscale x 16 x i8> %op1, <32 x i8> %op2, <vscale x 16 x i1> %mask)
+  ret <vscale x 16 x i1> %r
+}
+
+define <16 x i1> @match_v16i8_v32i8(<16 x i8> %op1, <32 x i8> %op2, <16 x i1> %mask) {
+; RV32-LABEL: match_v16i8_v32i8:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -64
+; RV32-NEXT:    .cfi_def_cfa_offset 64
+; RV32-NEXT:    sw ra, 60(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s0, 56(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s1, 52(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s2, 48(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s3, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s4, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s5, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s6, 32(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s7, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s8, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s9, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s10, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT:    sw s11, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT:    .cfi_offset ra, -4
+; RV32-NEXT:    .cfi_offset s0, -8
+; RV32-NEXT:    .cfi_offset s1, -12
+; RV32-NEXT:    .cfi_offset s2, -16
+; RV32-NEXT:    .cfi_offset s3, -20
+; RV32-NEXT:    .cfi_offset s4, -24
+; RV32-NEXT:    .cfi_offset s5, -28
+; RV32-NEXT:    .cfi_offset s6, -32
+; RV32-NEXT:    .cfi_offset s7, -36
+; RV32-NEXT:    .cfi_offset s8, -40
+; RV32-NEXT:    .cfi_offset s9, -44
+; RV32-NEXT:    .cfi_offset s10, -48
+; RV32-NEXT:    .cfi_offset s11, -52
+; RV32-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a0, v10
+; RV32-NEXT:    vslidedown.vi v9, v10, 1
+; RV32-NEXT:    vslidedown.vi v12, v10, 2
+; RV32-NEXT:    vslidedown.vi v13, v10, 3
+; RV32-NEXT:    vslidedown.vi v14, v10, 4
+; RV32-NEXT:    vslidedown.vi v15, v10, 5
+; RV32-NEXT:    vslidedown.vi v16, v10, 6
+; RV32-NEXT:    vslidedown.vi v17, v10, 7
+; RV32-NEXT:    vslidedown.vi v18, v10, 8
+; RV32-NEXT:    vslidedown.vi v19, v10, 9
+; RV32-NEXT:    vslidedown.vi v20, v10, 10
+; RV32-NEXT:    vslidedown.vi v21, v10, 11
+; RV32-NEXT:    vslidedown.vi v22, v10, 12
+; RV32-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV32-NEXT:    vslidedown.vi v24, v10, 16
+; RV32-NEXT:    vmv.x.s a1, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 17
+; RV32-NEXT:    vmv.x.s a2, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 18
+; RV32-NEXT:    vmv.x.s a3, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 19
+; RV32-NEXT:    vmv.x.s a4, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 20
+; RV32-NEXT:    vmv.x.s a5, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 21
+; RV32-NEXT:    vmv.x.s a6, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 22
+; RV32-NEXT:    vmv.x.s a7, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 23
+; RV32-NEXT:    vmv.x.s t0, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 24
+; RV32-NEXT:    vmv.x.s t1, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 25
+; RV32-NEXT:    vmv.x.s t2, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 26
+; RV32-NEXT:    vmv.x.s t3, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 27
+; RV32-NEXT:    vmv.x.s t4, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 28
+; RV32-NEXT:    vmv.x.s t5, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 29
+; RV32-NEXT:    vmv.x.s t6, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 30
+; RV32-NEXT:    vmv.x.s s0, v24
+; RV32-NEXT:    vslidedown.vi v24, v10, 31
+; RV32-NEXT:    vmv.x.s s1, v24
+; RV32-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; RV32-NEXT:    vslidedown.vi v11, v10, 13
+; RV32-NEXT:    vslidedown.vi v23, v10, 14
+; RV32-NEXT:    vslidedown.vi v10, v10, 15
+; RV32-NEXT:    vmv.x.s s2, v9
+; RV32-NEXT:    vmv.x.s s3, v12
+; RV32-NEXT:    vmv.x.s s4, v13
+; RV32-NEXT:    vmv.x.s s5, v14
+; RV32-NEXT:    vmv.x.s s6, v15
+; RV32-NEXT:    vmv.x.s s7, v16
+; RV32-NEXT:    vmv.x.s s8, v17
+; RV32-NEXT:    vmv.x.s s9, v18
+; RV32-NEXT:    vmv.x.s s10, v19
+; RV32-NEXT:    vmv.x.s s11, v20
+; RV32-NEXT:    vmv.x.s ra, v21
+; RV32-NEXT:    vmseq.vx v9, v8, a0
+; RV32-NEXT:    vmv.x.s a0, v22
+; RV32-NEXT:    vmseq.vx v12, v8, s2
+; RV32-NEXT:    vmv.x.s s2, v11
+; RV32-NEXT:    vmseq.vx v11, v8, s3
+; RV32-NEXT:    vmv.x.s s3, v23
+; RV32-NEXT:    vmseq.vx v13, v8, s4
+; RV32-NEXT:    vmv.x.s s4, v10
+; RV32-NEXT:    vmseq.vx v10, v8, s5
+; RV32-NEXT:    vmor.mm v9, v9, v12
+; RV32-NEXT:    vmseq.vx v12, v8, s6
+; RV32-NEXT:    vmor.mm v9, v9, v11
+; RV32-NEXT:    vmseq.vx v11, v8, s7
+; RV32-NEXT:    vmor.mm v9, v9, v13
+; RV32-NEXT:    vmseq.vx v13, v8, s8
+; RV32-NEXT:    vmor.mm v9, v9, v10
+; RV32-NEXT:    vmseq.vx v10, v8, s9
+; RV32-NEXT:    vmor.mm v9, v9, v12
+; RV32-NEXT:    vmseq.vx v12, v8, s10
+; RV32-NEXT:    vmor.mm v9, v9, v11
+; RV32-NEXT:    vmseq.vx v11, v8, s11
+; RV32-NEXT:    vmor.mm v9, v9, v13
+; RV32-NEXT:    vmseq.vx v13, v8, ra
+; RV32-NEXT:    vmor.mm v9, v9, v10
+; RV32-NEXT:    vmseq.vx v10, v8, a0
+; RV32-NEXT:    vmor.mm v9, v9, v12
+; RV32-NEXT:    vmseq.vx v12, v8, s2
+; RV32-NEXT:    vmor.mm v9, v9, v11
+; RV32-NEXT:    vmseq.vx v11, v8, s3
+; RV32-NEXT:    vmor.mm v9, v9, v13
+; RV32-NEXT:    vmseq.vx v13, v8, s4
+; RV32-NEXT:    vmor.mm v9, v9, v10
+; RV32-NEXT:    vmseq.vx v10, v8, a1
+; RV32-NEXT:    vmor.mm v9, v9, v12
+; RV32-NEXT:    vmseq.vx v12, v8, a2
+; RV32-NEXT:    vmor.mm v9, v9, v11
+; RV32-NEXT:    vmseq.vx v11, v8, a3
+; RV32-NEXT:    vmor.mm v9, v9, v13
+; RV32-NEXT:    vmseq.vx v13, v8, a4
+; RV32-NEXT:    vmor.mm v9, v9, v10
+; RV32-NEXT:    vmseq.vx v10, v8, a5
+; RV32-NEXT:    vmor.mm v9, v9, v12
+; RV32-NEXT:    vmseq.vx v12, v8, a6
+; RV32-NEXT:    vmor.mm v9, v9, v11
+; RV32-NEXT:    vmseq.vx v11, v8, a7
+; RV32-NEXT:    vmor.mm v9, v9, v13
+; RV32-NEXT:    vmseq.vx v13, v8, t0
+; RV32-NEXT:    vmor.mm v9, v9, v10
+; RV32-NEXT:    vmseq.vx v10, v8, t1
+; RV32-NEXT:    vmor.mm v9, v9, v12
+; RV32-NEXT:    vmseq.vx v12, v8, t2
+; RV32-NEXT:    vmor.mm v9, v9, v11
+; RV32-NEXT:    vmseq.vx v11, v8, t3
+; RV32-NEXT:    vmor.mm v9, v9, v13
+; RV32-NEXT:    vmseq.vx v13, v8, t4
+; RV32-NEXT:    vmor.mm v9, v9, v10
+; RV32-NEXT:    vmseq.vx v10, v8, t5
+; RV32-NEXT:    vmor.mm v9, v9, v12
+; RV32-NEXT:    vmseq.vx v12, v8, t6
+; RV32-NEXT:    vmor.mm v9, v9, v11
+; RV32-NEXT:    vmseq.vx v11, v8, s0
+; RV32-NEXT:    vmor.mm v9, v9, v13
+; RV32-NEXT:    vmor.mm v9, v9, v10
+; RV32-NEXT:    vmor.mm v9, v9, v12
+; RV32-NEXT:    vmor.mm v9, v9, v11
+; RV32-NEXT:    vmseq.vx v8, v8, s1
+; RV32-NEXT:    vmor.mm v8, v9, v8
+; RV32-NEXT:    vmand.mm v0, v8, v0
+; RV32-NEXT:    lw ra, 60(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s0, 56(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s1, 52(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s2, 48(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s3, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s4, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s5, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s6, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s7, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s8, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s9, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s10, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT:    lw s11, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT:    .cfi_restore ra
+; RV32-NEXT:    .cfi_restore s0
+; RV32-NEXT:    .cfi_restore s1
+; RV32-NEXT:    .cfi_restore s2
+; RV32-NEXT:    .cfi_restore s3
+; RV32-NEXT:    .cfi_restore s4
+; RV32-NEXT:    .cfi_restore s5
+; RV32-NEXT:    .cfi_restore s6
+; RV32-NEXT:    .cfi_restore s7
+; RV32-NEXT:    .cfi_restore s8
+; RV32-NEXT:    .cfi_restore s9
+; RV32-NEXT:    .cfi_restore s10
+; RV32-NEXT:    .cfi_restore s11
+; RV32-NEXT:    addi sp, sp, 64
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: match_v16i8_v32i8:
+; RV64:       # %bb.0:
+; RV64-NEXT:    addi sp, sp, -112
+; RV64-NEXT:    .cfi_def_cfa_offset 112
+; RV64-NEXT:    sd ra, 104(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s0, 96(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s1, 88(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s2, 80(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s3, 72(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s4, 64(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s5, 56(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s6, 48(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s7, 40(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s8, 32(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s9, 24(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s10, 16(sp) # 8-byte Folded Spill
+; RV64-NEXT:    sd s11, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT:    .cfi_offset ra, -8
+; RV64-NEXT:    .cfi_offset s0, -16
+; RV64-NEXT:    .cfi_offset s1, -24
+; RV64-NEXT:    .cfi_offset s2, -32
+; RV64-NEXT:    .cfi_offset s3, -40
+; RV64-NEXT:    .cfi_offset s4, -48
+; RV64-NEXT:    .cfi_offset s5, -56
+; RV64-NEXT:    .cfi_offset s6, -64
+; RV64-NEXT:    .cfi_offset s7, -72
+; RV64-NEXT:    .cfi_offset s8, -80
+; RV64-NEXT:    .cfi_offset s9, -88
+; RV64-NEXT:    .cfi_offset s10, -96
+; RV64-NEXT:    .cfi_offset s11, -104
+; RV64-NEXT:    vsetivli zero, 1, e8, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a0, v10
+; RV64-NEXT:    vslidedown.vi v9, v10, 1
+; RV64-NEXT:    vslidedown.vi v12, v10, 2
+; RV64-NEXT:    vslidedown.vi v13, v10, 3
+; RV64-NEXT:    vslidedown.vi v14, v10, 4
+; RV64-NEXT:    vslidedown.vi v15, v10, 5
+; RV64-NEXT:    vslidedown.vi v16, v10, 6
+; RV64-NEXT:    vslidedown.vi v17, v10, 7
+; RV64-NEXT:    vslidedown.vi v18, v10, 8
+; RV64-NEXT:    vslidedown.vi v19, v10, 9
+; RV64-NEXT:    vslidedown.vi v20, v10, 10
+; RV64-NEXT:    vslidedown.vi v21, v10, 11
+; RV64-NEXT:    vslidedown.vi v22, v10, 12
+; RV64-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
+; RV64-NEXT:    vslidedown.vi v24, v10, 16
+; RV64-NEXT:    vmv.x.s a1, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 17
+; RV64-NEXT:    vmv.x.s a2, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 18
+; RV64-NEXT:    vmv.x.s a3, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 19
+; RV64-NEXT:    vmv.x.s a4, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 20
+; RV64-NEXT:    vmv.x.s a5, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 21
+; RV64-NEXT:    vmv.x.s a6, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 22
+; RV64-NEXT:    vmv.x.s a7, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 23
+; RV64-NEXT:    vmv.x.s t0, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 24
+; RV64-NEXT:    vmv.x.s t1, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 25
+; RV64-NEXT:    vmv.x.s t2, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 26
+; RV64-NEXT:    vmv.x.s t3, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 27
+; RV64-NEXT:    vmv.x.s t4, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 28
+; RV64-NEXT:    vmv.x.s t5, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 29
+; RV64-NEXT:    vmv.x.s t6, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 30
+; RV64-NEXT:    vmv.x.s s0, v24
+; RV64-NEXT:    vslidedown.vi v24, v10, 31
+; RV64-NEXT:    vmv.x.s s1, v24
+; RV64-NEXT:    vsetivli zero, 16, e8, m1, ta, ma
+; RV64-NEXT:    vslidedown.vi v11, v10, 13
+; RV64-NEXT:    vslidedown.vi v23, v10, 14
+; RV64-NEXT:    vslidedown.vi v10, v10, 15
+; RV64-NEXT:    vmv.x.s s2, v9
+; RV64-NEXT:    vmv.x.s s3, v12
+; RV64-NEXT:    vmv.x.s s4, v13
+; RV64-NEXT:    vmv.x.s s5, v14
+; RV64-NEXT:    vmv.x.s s6, v15
+; RV64-NEXT:    vmv.x.s s7, v16
+; RV64-NEXT:    vmv.x.s s8, v17
+; RV64-NEXT:    vmv.x.s s9, v18
+; RV64-NEXT:    vmv.x.s s10, v19
+; RV64-NEXT:    vmv.x.s s11, v20
+; RV64-NEXT:    vmv.x.s ra, v21
+; RV64-NEXT:    vmseq.vx v9, v8, a0
+; RV64-NEXT:    vmv.x.s a0, v22
+; RV64-NEXT:    vmseq.vx v12, v8, s2
+; RV64-NEXT:    vmv.x.s s2, v11
+; RV64-NEXT:    vmseq.vx v11, v8, s3
+; RV64-NEXT:    vmv.x.s s3, v23
+; RV64-NEXT:    vmseq.vx v13, v8, s4
+; RV64-NEXT:    vmv.x.s s4, v10
+; RV64-NEXT:    vmseq.vx v10, v8, s5
+; RV64-NEXT:    vmor.mm v9, v9, v12
+; RV64-NEXT:    vmseq.vx v12, v8, s6
+; RV64-NEXT:    vmor.mm v9, v9, v11
+; RV64-NEXT:    vmseq.vx v11, v8, s7
+; RV64-NEXT:    vmor.mm v9, v9, v13
+; RV64-NEXT:    vmseq.vx v13, v8, s8
+; RV64-NEXT:    vmor.mm v9, v9, v10
+; RV64-NEXT:    vmseq.vx v10, v8, s9
+; RV64-NEXT:    vmor.mm v9, v9, v12
+; RV64-NEXT:    vmseq.vx v12, v8, s10
+; RV64-NEXT:    vmor.mm v9, v9, v11
+; RV64-NEXT:    vmseq.vx v11, v8, s11
+; RV64-NEXT:    vmor.mm v9, v9, v13
+; RV64-NEXT:    vmseq.vx v13, v8, ra
+; RV64-NEXT:    vmor.mm v9, v9, v10
+; RV64-NEXT:    vmseq.vx v10, v8, a0
+; RV64-NEXT:    vmor.mm v9, v9, v12
+; RV64-NEXT:    vmseq.vx v12, v8, s2
+; RV64-NEXT:    vmor.mm v9, v9, v11
+; RV64-NEXT:    vmseq.vx v11, v8, s3
+; RV64-NEXT:    vmor.mm v9, v9, v13
+; RV64-NEXT:    vmseq.vx v13, v8, s4
+; RV64-NEXT:    vmor.mm v9, v9, v10
+; RV64-NEXT:    vmseq.vx v10, v8, a1
+; RV64-NEXT:    vmor.mm v9, v9, v12
+; RV64-NEXT:    vmseq.vx v12, v8, a2
+; RV64-NEXT:    vmor.mm v9, v9, v11
+; RV64-NEXT:    vmseq.vx v11, v8, a3
+; RV64-NEXT:    vmor.mm v9, v9, v13
+; RV64-NEXT:    vmseq.vx v13, v8, a4
+; RV64-NEXT:    vmor.mm v9, v9, v10
+; RV64-NEXT:    vmseq.vx v10, v8, a5
+; RV64-NEXT:    vmor.mm v9, v9, v12
+; RV64-NEXT:    vmseq.vx v12, v8, a6
+; RV64-NEXT:    vmor.mm v9, v9, v11
+; RV64-NEXT:    vmseq.vx v11, v8, a7
+; RV64-NEXT:    vmor.mm v9, v9, v13
+; RV64-NEXT:    vmseq.vx v13, v8, t0
+; RV64-NEXT:    vmor.mm v9, v9, v10
+; RV64-NEXT:    vmseq.vx v10, v8, t1
+; RV64-NEXT:    vmor.mm v9, v9, v12
+; RV64-NEXT:    vmseq.vx v12, v8, t2
+; RV64-NEXT:    vmor.mm v9, v9, v11
+; RV64-NEXT:    vmseq.vx v11, v8, t3
+; RV64-NEXT:    vmor.mm v9, v9, v13
+; RV64-NEXT:    vmseq.vx v13, v8, t4
+; RV64-NEXT:    vmor.mm v9, v9, v10
+; RV64-NEXT:    vmseq.vx v10, v8, t5
+; RV64-NEXT:    vmor.mm v9, v9, v12
+; RV64-NEXT:    vmseq.vx v12, v8, t6
+; RV64-NEXT:    vmor.mm v9, v9, v11
+; RV64-NEXT:    vmseq.vx v11, v8, s0
+; RV64-NEXT:    vmor.mm v9, v9, v13
+; RV64-NEXT:    vmor.mm v9, v9, v10
+; RV64-NEXT:    vmor.mm v9, v9, v12
+; RV64-NEXT:    vmor.mm v9, v9, v11
+; RV64-NEXT:    vmseq.vx v8, v8, s1
+; RV64-NEXT:    vmor.mm v8, v9, v8
+; RV64-NEXT:    vmand.mm v0, v8, v0
+; RV64-NEXT:    ld ra, 104(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s0, 96(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s1, 88(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s2, 80(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s3, 72(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s4, 64(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s5, 56(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s6, 48(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s7, 40(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s8, 32(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s9, 24(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s10, 16(sp) # 8-byte Folded Reload
+; RV64-NEXT:    ld s11, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT:    .cfi_restore ra
+; RV64-NEXT:    .cfi_restore s0
+; RV64-NEXT:    .cfi_restore s1
+; RV64-NEXT:    .cfi_restore s2
+; RV64-NEXT:    .cfi_restore s3
+; RV64-NEXT:    .cfi_restore s4
+; RV64-NEXT:    .cfi_restore s5
+; RV64-NEXT:    .cfi_restore s6
+; RV64-NEXT:    .cfi_restore s7
+; RV64-NEXT:    .cfi_restore s8
+; RV64-NEXT:    .cfi_restore s9
+; RV64-NEXT:    .cfi_restore s10
+; RV64-NEXT:    .cfi_restore s11
+; RV64-NEXT:    addi sp, sp, 112
+; RV64-NEXT:    .cfi_def_cfa_offset 0
+; RV64-NEXT:    ret
+  %r = tail call <16 x i1> @llvm.experimental.vector.match(<16 x i8> %op1, <32 x i8> %op2, <16 x i1> %mask)
+  ret <16 x i1> %r
+}
+
+define <vscale x 4 x i1> @match_nxv4xi32_v4i32(<vscale x 4 x i32> %op1, <4 x i32> %op2, <vscale x 4 x i1> %mask) {
+; CHECK-LABEL: match_nxv4xi32_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT:    vmv.x.s a0, v10
+; CHECK-NEXT:    vslidedown.vi v11, v10, 1
+; CHECK-NEXT:    vslidedown.vi v12, v10, 2
+; CHECK-NEXT:    vslidedown.vi v10, v10, 3
+; CHECK-NEXT:    vmv.x.s a1, v11
+; CHECK-NEXT:    vsetvli a2, zero, e32, m2, ta, ma
+; CHECK-NEXT:    vmseq.vx v11, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v12
+; CHECK-NEXT:    vmseq.vx v12, v8, a1
+; CHECK-NEXT:    vmv.x.s a1, v10
+; CHECK-NEXT:    vmseq.vx v10, v8, a0
+; CHECK-NEXT:    vmor.mm v11, v11, v12
+; CHECK-NEXT:    vmor.mm v10, v11, v10
+; CHECK-NEXT:    vmseq.vx v11, v8, a1
+; CHECK-NEXT:    vmor.mm v8, v10, v11
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <vscale x 4 x i1> @llvm.experimental.vector.match(<vscale x 4 x i32> %op1, <4 x i32> %op2, <vscale x 4 x i1> %mask)
+  ret <vscale x 4 x i1> %r
+}
+
+define <vscale x 2 x i1> @match_nxv2xi64_v2i64(<vscale x 2 x i64> %op1, <2 x i64> %op2, <vscale x 2 x i1> %mask) {
+; RV32-LABEL: match_nxv2xi64_v2i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    addi sp, sp, -16
+; RV32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vmv.x.s a0, v10
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vslidedown.vi v11, v10, 1
+; RV32-NEXT:    addi a2, sp, 8
+; RV32-NEXT:    vsrl.vx v10, v10, a1
+; RV32-NEXT:    vmv.x.s a3, v11
+; RV32-NEXT:    vsrl.vx v11, v11, a1
+; RV32-NEXT:    vmv.x.s a1, v10
+; RV32-NEXT:    sw a0, 8(sp)
+; RV32-NEXT:    sw a1, 12(sp)
+; RV32-NEXT:    vmv.x.s a0, v11
+; RV32-NEXT:    sw a3, 0(sp)
+; RV32-NEXT:    sw a0, 4(sp)
+; RV32-NEXT:    mv a0, sp
+; RV32-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; RV32-NEXT:    vlse64.v v10, (a2), zero
+; RV32-NEXT:    vlse64.v v12, (a0), zero
+; RV32-NEXT:    vmseq.vv v14, v8, v10
+; RV32-NEXT:    vmseq.vv v10, v8, v12
+; RV32-NEXT:    vmor.mm v8, v14, v10
+; RV32-NEXT:    vmand.mm v0, v8, v0
+; RV32-NEXT:    addi sp, sp, 16
+; RV32-NEXT:    .cfi_def_cfa_offset 0
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: match_nxv2xi64_v2i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT:    vmv.x.s a0, v10
+; RV64-NEXT:    vslidedown.vi v10, v10, 1
+; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, ma
+; RV64-NEXT:    vmseq.vx v11, v8, a0
+; RV64-NEXT:    vmv.x.s a0, v10
+; RV64-NEXT:    vmseq.vx v10, v8, a0
+; RV64-NEXT:    vmor.mm v8, v11, v10
+; RV64-NEXT:    vmand.mm v0, v8, v0
+; RV64-NEXT:    ret
+  %r = tail call <vscale x 2 x i1> @llvm.experimental.vector.match(<vscale x 2 x i64> %op1, <2 x i64> %op2, <vscale x 2 x i1> %mask)
+  ret <vscale x 2 x i1> %r
+}
+
+define <4 x i1> @match_v4xi32_v4i32(<4 x i32> %op1, <4 x i32> %op2, <4 x i1> %mask) {
+; CHECK-LABEL: match_v4xi32_v4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vrgather.vi v10, v9, 1
+; CHECK-NEXT:    vrgather.vi v11, v9, 0
+; CHECK-NEXT:    vmseq.vv v10, v8, v10
+; CHECK-NEXT:    vmseq.vv v11, v8, v11
+; CHECK-NEXT:    vmor.mm v10, v11, v10
+; CHECK-NEXT:    vrgather.vi v11, v9, 2
+; CHECK-NEXT:    vrgather.vi v12, v9, 3
+; CHECK-NEXT:    vmseq.vv v9, v8, v11
+; CHECK-NEXT:    vmor.mm v9, v10, v9
+; CHECK-NEXT:    vmseq.vv v8, v8, v12
+; CHECK-NEXT:    vmor.mm v8, v9, v8
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <4 x i1> @llvm.experimental.vector.match(<4 x i32> %op1, <4 x i32> %op2, <4 x i1> %mask)
+  ret <4 x i1> %r
+}
+
+define <2 x i1> @match_v2xi64_v2i64(<2 x i64> %op1, <2 x i64> %op2, <2 x i1> %mask) {
+; CHECK-LABEL: match_v2xi64_v2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vrgather.vi v10, v9, 1
+; CHECK-NEXT:    vrgather.vi v11, v9, 0
+; CHECK-NEXT:    vmseq.vv v9, v8, v10
+; CHECK-NEXT:    vmseq.vv v8, v8, v11
+; CHECK-NEXT:    vmor.mm v8, v8, v9
+; CHECK-NEXT:    vmand.mm v0, v8, v0
+; CHECK-NEXT:    ret
+  %r = tail call <2 x i1> @llvm.experimental.vector.match(<2 x i64> %op1, <2 x i64> %op2, <2 x i1> %mask)
+  ret <2 x i1> %r
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll b/llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll
new file mode 100644
index 00000000000000..1eef183db21bb3
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-extract-last-active.ll
@@ -0,0 +1,378 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3
+; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvfh -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,RV32
+; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvfh -verify-machineinstrs | FileCheck %s -check-prefixes=CHECK,RV64
+
+define i8 @extract_last_i8(<16 x i8> %data, <16 x i8> %mask, i8 %passthru) {
+; CHECK-LABEL: extract_last_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; CHECK-NEXT:    vmsne.vi v0, v9, 0
+; CHECK-NEXT:    vmv.v.i v9, 0
+; CHECK-NEXT:    vcpop.m a1, v0
+; CHECK-NEXT:    vid.v v9, v0.t
+; CHECK-NEXT:    beqz a1, .LBB0_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    vredmaxu.vs v9, v9, v9
+; CHECK-NEXT:    vmv.x.s a0, v9
+; CHECK-NEXT:    andi a0, a0, 255
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:  .LBB0_2:
+; CHECK-NEXT:    ret
+  %notzero = icmp ne <16 x i8> %mask, zeroinitializer
+  %res = call i8 @llvm.experimental.vector.extract.last.active.v16i8(<16 x i8> %data, <16 x i1> %notzero, i8 %passthru)
+  ret i8 %res
+}
+
+define i16 @extract_last_i16(<8 x i16> %data, <8 x i16> %mask, i16 %passthru) {
+; CHECK-LABEL: extract_last_i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vmsne.vi v0, v9, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
+; CHECK-NEXT:    vmv.v.i v9, 0
+; CHECK-NEXT:    vcpop.m a1, v0
+; CHECK-NEXT:    vid.v v9, v0.t
+; CHECK-NEXT:    beqz a1, .LBB1_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    vredmaxu.vs v9, v9, v9
+; CHECK-NEXT:    vmv.x.s a0, v9
+; CHECK-NEXT:    andi a0, a0, 255
+; CHECK-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:  .LBB1_2:
+; CHECK-NEXT:    ret
+  %notzero = icmp ne <8 x i16> %mask, zeroinitializer
+  %res = call i16 @llvm.experimental.vector.extract.last.active.v8i16(<8 x i16> %data, <8 x i1> %notzero, i16 %passthru)
+  ret i16 %res
+}
+
+define i32 @extract_last_i32(<4 x i32> %data, <4 x i32> %mask, i32 %passthru) {
+; CHECK-LABEL: extract_last_i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vmsne.vi v0, v9, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
+; CHECK-NEXT:    vmv.v.i v9, 0
+; CHECK-NEXT:    vcpop.m a1, v0
+; CHECK-NEXT:    vid.v v9, v0.t
+; CHECK-NEXT:    beqz a1, .LBB2_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    vredmaxu.vs v9, v9, v9
+; CHECK-NEXT:    vmv.x.s a0, v9
+; CHECK-NEXT:    andi a0, a0, 255
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:  .LBB2_2:
+; CHECK-NEXT:    ret
+  %notzero = icmp ne <4 x i32> %mask, zeroinitializer
+  %res = call i32 @llvm.experimental.vector.extract.last.active.v4i32(<4 x i32> %data, <4 x i1> %notzero, i32 %passthru)
+  ret i32 %res
+}
+
+define i64 @extract_last_i64(<2 x i64> %data, <2 x i64> %mask, i64 %passthru) {
+; RV32-LABEL: extract_last_i64:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV32-NEXT:    vmsne.vi v0, v9, 0
+; RV32-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
+; RV32-NEXT:    vmv.v.i v9, 0
+; RV32-NEXT:    vcpop.m a2, v0
+; RV32-NEXT:    vid.v v9, v0.t
+; RV32-NEXT:    beqz a2, .LBB3_2
+; RV32-NEXT:  # %bb.1:
+; RV32-NEXT:    vredmaxu.vs v9, v9, v9
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vmv.x.s a0, v9
+; RV32-NEXT:    andi a0, a0, 255
+; RV32-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; RV32-NEXT:    vslidedown.vx v8, v8, a0
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:  .LBB3_2:
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extract_last_i64:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT:    vmsne.vi v0, v9, 0
+; RV64-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
+; RV64-NEXT:    vmv.v.i v9, 0
+; RV64-NEXT:    vcpop.m a1, v0
+; RV64-NEXT:    vid.v v9, v0.t
+; RV64-NEXT:    beqz a1, .LBB3_2
+; RV64-NEXT:  # %bb.1:
+; RV64-NEXT:    vredmaxu.vs v9, v9, v9
+; RV64-NEXT:    vmv.x.s a0, v9
+; RV64-NEXT:    andi a0, a0, 255
+; RV64-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; RV64-NEXT:    vslidedown.vx v8, v8, a0
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:  .LBB3_2:
+; RV64-NEXT:    ret
+  %notzero = icmp ne <2 x i64> %mask, zeroinitializer
+  %res = call i64 @llvm.experimental.vector.extract.last.active.v2i64(<2 x i64> %data, <2 x i1> %notzero, i64 %passthru)
+  ret i64 %res
+}
+
+define float @extract_last_float(<4 x float> %data, <4 x i32> %mask, float %passthru) {
+; CHECK-LABEL: extract_last_float:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
+; CHECK-NEXT:    vmsne.vi v0, v9, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
+; CHECK-NEXT:    vmv.v.i v9, 0
+; CHECK-NEXT:    vcpop.m a0, v0
+; CHECK-NEXT:    vid.v v9, v0.t
+; CHECK-NEXT:    beqz a0, .LBB4_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    vredmaxu.vs v9, v9, v9
+; CHECK-NEXT:    vmv.x.s a0, v9
+; CHECK-NEXT:    andi a0, a0, 255
+; CHECK-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
+; CHECK-NEXT:    vfmv.f.s fa0, v8
+; CHECK-NEXT:  .LBB4_2:
+; CHECK-NEXT:    ret
+  %notzero = icmp ne <4 x i32> %mask, zeroinitializer
+  %res = call float @llvm.experimental.vector.extract.last.active.v4f32(<4 x float> %data, <4 x i1> %notzero, float %passthru)
+  ret float %res
+}
+
+define double @extract_last_double(<2 x double> %data, <2 x i64> %mask, double %passthru) {
+; CHECK-LABEL: extract_last_double:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT:    vmsne.vi v0, v9, 0
+; CHECK-NEXT:    vsetvli zero, zero, e8, mf8, ta, mu
+; CHECK-NEXT:    vmv.v.i v9, 0
+; CHECK-NEXT:    vcpop.m a0, v0
+; CHECK-NEXT:    vid.v v9, v0.t
+; CHECK-NEXT:    beqz a0, .LBB5_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    vredmaxu.vs v9, v9, v9
+; CHECK-NEXT:    vmv.x.s a0, v9
+; CHECK-NEXT:    andi a0, a0, 255
+; CHECK-NEXT:    vsetvli zero, zero, e64, m1, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
+; CHECK-NEXT:    vfmv.f.s fa0, v8
+; CHECK-NEXT:  .LBB5_2:
+; CHECK-NEXT:    ret
+  %notzero = icmp ne <2 x i64> %mask, zeroinitializer
+  %res = call double @llvm.experimental.vector.extract.last.active.v2f64(<2 x double> %data, <2 x i1> %notzero, double %passthru)
+  ret double %res
+}
+
+define i8 @extract_last_i8_scalable(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, i8 %passthru) {
+; CHECK-LABEL: extract_last_i8_scalable:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
+; CHECK-NEXT:    vmv.v.i v10, 0
+; CHECK-NEXT:    vcpop.m a1, v0
+; CHECK-NEXT:    vid.v v10, v0.t
+; CHECK-NEXT:    beqz a1, .LBB6_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    vredmaxu.vs v10, v10, v10
+; CHECK-NEXT:    vmv.x.s a0, v10
+; CHECK-NEXT:    andi a0, a0, 255
+; CHECK-NEXT:    vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
+; CHECK-NEXT:    vmv.x.s a0, v8
+; CHECK-NEXT:  .LBB6_2:
+; CHECK-NEXT:    ret
+  %res = call i8 @llvm.experimental.vector.extract.last.active.nxv16i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %mask, i8 %passthru)
+  ret i8 %res
+}
+
+define i16 @extract_last_i16_scalable(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask, i16 %passthru) {
+; RV32-LABEL: extract_last_i16_scalable:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
+; RV32-NEXT:    vmv.v.i v10, 0
+; RV32-NEXT:    vcpop.m a1, v0
+; RV32-NEXT:    vid.v v10, v0.t
+; RV32-NEXT:    beqz a1, .LBB7_2
+; RV32-NEXT:  # %bb.1:
+; RV32-NEXT:    vredmaxu.vs v10, v10, v10
+; RV32-NEXT:    vmv.x.s a0, v10
+; RV32-NEXT:    slli a0, a0, 16
+; RV32-NEXT:    srli a0, a0, 16
+; RV32-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; RV32-NEXT:    vslidedown.vx v8, v8, a0
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:  .LBB7_2:
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extract_last_i16_scalable:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
+; RV64-NEXT:    vmv.v.i v10, 0
+; RV64-NEXT:    vcpop.m a1, v0
+; RV64-NEXT:    vid.v v10, v0.t
+; RV64-NEXT:    beqz a1, .LBB7_2
+; RV64-NEXT:  # %bb.1:
+; RV64-NEXT:    vredmaxu.vs v10, v10, v10
+; RV64-NEXT:    vmv.x.s a0, v10
+; RV64-NEXT:    slli a0, a0, 48
+; RV64-NEXT:    srli a0, a0, 48
+; RV64-NEXT:    vsetvli zero, zero, e16, m2, ta, ma
+; RV64-NEXT:    vslidedown.vx v8, v8, a0
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:  .LBB7_2:
+; RV64-NEXT:    ret
+  %res = call i16 @llvm.experimental.vector.extract.last.active.nxv8i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %mask, i16 %passthru)
+  ret i16 %res
+}
+
+define i32 @extract_last_i32_scalable(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask, i32 %passthru) {
+; RV32-LABEL: extract_last_i32_scalable:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vmv.v.i v10, 0
+; RV32-NEXT:    vcpop.m a1, v0
+; RV32-NEXT:    vid.v v10, v0.t
+; RV32-NEXT:    beqz a1, .LBB8_2
+; RV32-NEXT:  # %bb.1:
+; RV32-NEXT:    vredmaxu.vs v10, v10, v10
+; RV32-NEXT:    vmv.x.s a0, v10
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vx v8, v8, a0
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:  .LBB8_2:
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extract_last_i32_scalable:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV64-NEXT:    vmv.v.i v10, 0
+; RV64-NEXT:    vcpop.m a1, v0
+; RV64-NEXT:    vid.v v10, v0.t
+; RV64-NEXT:    beqz a1, .LBB8_2
+; RV64-NEXT:  # %bb.1:
+; RV64-NEXT:    vredmaxu.vs v10, v10, v10
+; RV64-NEXT:    vmv.x.s a0, v10
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT:    vslidedown.vx v8, v8, a0
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:  .LBB8_2:
+; RV64-NEXT:    ret
+  %res = call i32 @llvm.experimental.vector.extract.last.active.nxv4i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %mask, i32 %passthru)
+  ret i32 %res
+}
+
+define i64 @extract_last_i64_scalable(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask, i64 %passthru) {
+; RV32-LABEL: extract_last_i64_scalable:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a2, zero, e64, m2, ta, mu
+; RV32-NEXT:    vmv.v.i v10, 0
+; RV32-NEXT:    vcpop.m a2, v0
+; RV32-NEXT:    vid.v v10, v0.t
+; RV32-NEXT:    beqz a2, .LBB9_2
+; RV32-NEXT:  # %bb.1:
+; RV32-NEXT:    vredmaxu.vs v10, v10, v10
+; RV32-NEXT:    li a1, 32
+; RV32-NEXT:    vmv.x.s a0, v10
+; RV32-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV32-NEXT:    vslidedown.vx v8, v8, a0
+; RV32-NEXT:    vmv.x.s a0, v8
+; RV32-NEXT:    vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT:    vsrl.vx v8, v8, a1
+; RV32-NEXT:    vmv.x.s a1, v8
+; RV32-NEXT:  .LBB9_2:
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extract_last_i64_scalable:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
+; RV64-NEXT:    vmv.v.i v10, 0
+; RV64-NEXT:    vcpop.m a1, v0
+; RV64-NEXT:    vid.v v10, v0.t
+; RV64-NEXT:    beqz a1, .LBB9_2
+; RV64-NEXT:  # %bb.1:
+; RV64-NEXT:    vredmaxu.vs v10, v10, v10
+; RV64-NEXT:    vmv.x.s a0, v10
+; RV64-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; RV64-NEXT:    vslidedown.vx v8, v8, a0
+; RV64-NEXT:    vmv.x.s a0, v8
+; RV64-NEXT:  .LBB9_2:
+; RV64-NEXT:    ret
+  %res = call i64 @llvm.experimental.vector.extract.last.active.nxv2i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %mask, i64 %passthru)
+  ret i64 %res
+}
+
+define float @extract_last_float_scalable(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask, float %passthru) {
+; RV32-LABEL: extract_last_float_scalable:
+; RV32:       # %bb.0:
+; RV32-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; RV32-NEXT:    vmv.v.i v10, 0
+; RV32-NEXT:    vcpop.m a0, v0
+; RV32-NEXT:    vid.v v10, v0.t
+; RV32-NEXT:    beqz a0, .LBB10_2
+; RV32-NEXT:  # %bb.1:
+; RV32-NEXT:    vredmaxu.vs v10, v10, v10
+; RV32-NEXT:    vmv.x.s a0, v10
+; RV32-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV32-NEXT:    vslidedown.vx v8, v8, a0
+; RV32-NEXT:    vfmv.f.s fa0, v8
+; RV32-NEXT:  .LBB10_2:
+; RV32-NEXT:    ret
+;
+; RV64-LABEL: extract_last_float_scalable:
+; RV64:       # %bb.0:
+; RV64-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
+; RV64-NEXT:    vmv.v.i v10, 0
+; RV64-NEXT:    vcpop.m a0, v0
+; RV64-NEXT:    vid.v v10, v0.t
+; RV64-NEXT:    beqz a0, .LBB10_2
+; RV64-NEXT:  # %bb.1:
+; RV64-NEXT:    vredmaxu.vs v10, v10, v10
+; RV64-NEXT:    vmv.x.s a0, v10
+; RV64-NEXT:    slli a0, a0, 32
+; RV64-NEXT:    srli a0, a0, 32
+; RV64-NEXT:    vsetvli zero, zero, e32, m2, ta, ma
+; RV64-NEXT:    vslidedown.vx v8, v8, a0
+; RV64-NEXT:    vfmv.f.s fa0, v8
+; RV64-NEXT:  .LBB10_2:
+; RV64-NEXT:    ret
+  %res = call float @llvm.experimental.vector.extract.last.active.nxv4f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %mask, float %passthru)
+  ret float %res
+}
+
+define double @extract_last_double_scalable(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask, double %passthru) {
+; CHECK-LABEL: extract_last_double_scalable:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
+; CHECK-NEXT:    vmv.v.i v10, 0
+; CHECK-NEXT:    vcpop.m a0, v0
+; CHECK-NEXT:    vid.v v10, v0.t
+; CHECK-NEXT:    beqz a0, .LBB11_2
+; CHECK-NEXT:  # %bb.1:
+; CHECK-NEXT:    vredmaxu.vs v10, v10, v10
+; CHECK-NEXT:    vmv.x.s a0, v10
+; CHECK-NEXT:    vsetvli zero, zero, e64, m2, ta, ma
+; CHECK-NEXT:    vslidedown.vx v8, v8, a0
+; CHECK-NEXT:    vfmv.f.s fa0, v8
+; CHECK-NEXT:  .LBB11_2:
+; CHECK-NEXT:    ret
+  %res = call double @llvm.experimental.vector.extract.last.active.nxv2f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %mask, double %passthru)
+  ret double %res
+}
+
+declare i8 @llvm.experimental.vector.extract.last.active.v16i8(<16 x i8>, <16 x i1>, i8)
+declare i16 @llvm.experimental.vector.extract.last.active.v8i16(<8 x i16>, <8 x i1>, i16)
+declare i32 @llvm.experimental.vector.extract.last.active.v4i32(<4 x i32>, <4 x i1>, i32)
+declare i64 @llvm.experimental.vector.extract.last.active.v2i64(<2 x i64>, <2 x i1>, i64)
+declare float @llvm.experimental.vector.extract.last.active.v4f32(<4 x float>, <4 x i1>, float)
+declare double @llvm.experimental.vector.extract.last.active.v2f64(<2 x double>, <2 x i1>, double)
+declare i8 @llvm.experimental.vector.extract.last.active.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8)
+declare i16 @llvm.experimental.vector.extract.last.active.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16)
+declare i32 @llvm.experimental.vector.extract.last.active.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32)
+declare i64 @llvm.experimental.vector.extract.last.active.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64)
+declare float @llvm.experimental.vector.extract.last.active.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float)
+declare double @llvm.experimental.vector.extract.last.active.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double)


        


More information about the llvm-commits mailing list