[llvm-branch-commits] [llvm] 9c6a00f - [RISCV] Add ISel patterns for scalable mask exts & truncs

Fraser Cormack via llvm-branch-commits llvm-branch-commits at lists.llvm.org
Tue Jan 19 10:24:01 PST 2021


Author: Fraser Cormack
Date: 2021-01-19T18:13:15Z
New Revision: 9c6a00fe99c4bbe329dd1933515f1a1a430fd5d7

URL: https://github.com/llvm/llvm-project/commit/9c6a00fe99c4bbe329dd1933515f1a1a430fd5d7
DIFF: https://github.com/llvm/llvm-project/commit/9c6a00fe99c4bbe329dd1933515f1a1a430fd5d7.diff

LOG: [RISCV] Add ISel patterns for scalable mask exts & truncs

Original patch by @rogfer01.

This patch adds support for sign-, zero-, and any-extension from
scalable mask vector types to integer vector types, as well as
truncation in the opposite direction.

Authored-by: Roger Ferrer Ibanez <rofirrim at gmail.com>
Co-Authored-by: Fraser Cormack <fraser at codeplay.com>

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D94590

Added: 
    llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv64.ll

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/lib/Target/RISCV/RISCVISelLowering.h

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 14795b5465be..8ca30d654421 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -373,9 +373,18 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
       setOperationAction(ISD::UMIN, VT, Legal);
       setOperationAction(ISD::UMAX, VT, Legal);
 
-      // Lower RVV truncates as a series of "RISCVISD::TRUNCATE_VECTOR"
-      // nodes which truncate by one power of two at a time.
-      setOperationAction(ISD::TRUNCATE, VT, Custom);
+      if (isTypeLegal(VT)) {
+        // Custom-lower extensions and truncations from/to mask types.
+        setOperationAction(ISD::ANY_EXTEND, VT, Custom);
+        setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
+        setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
+
+        // We custom-lower all legally-typed vector truncates:
+        // 1. Mask VTs are custom-expanded into a series of standard nodes
+        // 2. Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR"
+        // nodes which truncate by one power of two at a time.
+        setOperationAction(ISD::TRUNCATE, VT, Custom);
+      }
     }
 
     // We must custom-lower SPLAT_VECTOR vXi64 on RV32
@@ -690,15 +699,19 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
                        DAG.getTargetConstant(Imm, DL, Subtarget.getXLenVT()));
   }
   case ISD::TRUNCATE: {
-    // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
-    // truncates as a series of "RISCVISD::TRUNCATE_VECTOR" nodes which
-    // truncate by one power of two at a time.
     SDLoc DL(Op);
     EVT VT = Op.getValueType();
-    // Only custom-lower non-mask truncates
-    if (!VT.isVector() || VT.getVectorElementType() == MVT::i1)
+    // Only custom-lower vector truncates
+    if (!VT.isVector())
       return Op;
 
+    // Truncates to mask types are handled 
diff erently
+    if (VT.getVectorElementType() == MVT::i1)
+      return lowerVectorMaskTrunc(Op, DAG);
+
+    // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
+    // truncates as a series of "RISCVISD::TRUNCATE_VECTOR" nodes which
+    // truncate by one power of two at a time.
     EVT DstEltVT = VT.getVectorElementType();
 
     SDValue Src = Op.getOperand(0);
@@ -721,6 +734,11 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
 
     return Result;
   }
+  case ISD::ANY_EXTEND:
+  case ISD::ZERO_EXTEND:
+    return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
+  case ISD::SIGN_EXTEND:
+    return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
   case ISD::SPLAT_VECTOR:
     return lowerSPLATVECTOR(Op, DAG);
   case ISD::VSCALE: {
@@ -1198,6 +1216,76 @@ SDValue RISCVTargetLowering::lowerSPLATVECTOR(SDValue Op,
   return DAG.getNode(ISD::OR, DL, VecVT, Lo, Hi);
 }
 
+// Custom-lower extensions from mask vectors by using a vselect either with 1
+// for zero/any-extension or -1 for sign-extension:
+//   (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
+// Note that any-extension is lowered identically to zero-extension.
+SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
+                                                int64_t ExtTrueVal) const {
+  SDLoc DL(Op);
+  EVT VecVT = Op.getValueType();
+  SDValue Src = Op.getOperand(0);
+  // Only custom-lower extensions from mask types
+  if (!Src.getValueType().isVector() ||
+      Src.getValueType().getVectorElementType() != MVT::i1)
+    return Op;
+
+  // Be careful not to introduce illegal scalar types at this stage, and be
+  // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
+  // illegal and must be expanded. Since we know that the constants are
+  // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
+  bool IsRV32E64 =
+      !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
+  SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
+  SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, Subtarget.getXLenVT());
+
+  if (!IsRV32E64) {
+    SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
+    SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
+  } else {
+    SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
+    SplatTrueVal =
+        DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
+  }
+
+  return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
+}
+
+// Custom-lower truncations from vectors to mask vectors by using a mask and a
+// setcc operation:
+//   (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
+SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
+                                                  SelectionDAG &DAG) const {
+  SDLoc DL(Op);
+  EVT MaskVT = Op.getValueType();
+  // Only expect to custom-lower truncations to mask types
+  assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
+         "Unexpected type for vector mask lowering");
+  SDValue Src = Op.getOperand(0);
+  EVT VecVT = Src.getValueType();
+
+  // Be careful not to introduce illegal scalar types at this stage, and be
+  // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
+  // illegal and must be expanded. Since we know that the constants are
+  // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
+  bool IsRV32E64 =
+      !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
+  SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
+  SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
+
+  if (!IsRV32E64) {
+    SplatOne = DAG.getSplatVector(VecVT, DL, SplatOne);
+    SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
+  } else {
+    SplatOne = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatOne);
+    SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
+  }
+
+  SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
+
+  return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
+}
+
 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
                                                      SelectionDAG &DAG) const {
   unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();

diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index e5bdc75a619e..b5b6077d5ccb 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -288,6 +288,9 @@ class RISCVTargetLowering : public TargetLowering {
   SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
   SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
   SDValue lowerSPLATVECTOR(SDValue Op, SelectionDAG &DAG) const;
+  SDValue lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
+                             int64_t ExtTrueVal) const;
+  SDValue lowerVectorMaskTrunc(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
   SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
 

diff  --git a/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv32.ll
new file mode 100644
index 000000000000..d6b1150c9f35
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv32.ll
@@ -0,0 +1,729 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x i8> @sext_nxv1i1_nxv1i8(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: sext_nxv1i1_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 1 x i1> %v to <vscale x 1 x i8>
+  ret <vscale x 1 x i8> %r
+}
+
+define <vscale x 1 x i8> @zext_nxv1i1_nxv1i8(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: zext_nxv1i1_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 1 x i1> %v to <vscale x 1 x i8>
+  ret <vscale x 1 x i8> %r
+}
+
+define <vscale x 1 x i1> @trunc_nxv1i8_nxv1i1(<vscale x 1 x i8> %v) {
+; CHECK-LABEL: trunc_nxv1i8_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 1 x i8> %v to <vscale x 1 x i1>
+  ret <vscale x 1 x i1> %r
+}
+
+define <vscale x 2 x i8> @sext_nxv2i1_nxv2i8(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: sext_nxv2i1_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 2 x i1> %v to <vscale x 2 x i8>
+  ret <vscale x 2 x i8> %r
+}
+
+define <vscale x 2 x i8> @zext_nxv2i1_nxv2i8(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: zext_nxv2i1_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 2 x i1> %v to <vscale x 2 x i8>
+  ret <vscale x 2 x i8> %r
+}
+
+define <vscale x 2 x i1> @trunc_nxv2i8_nxv2i1(<vscale x 2 x i8> %v) {
+; CHECK-LABEL: trunc_nxv2i8_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 2 x i8> %v to <vscale x 2 x i1>
+  ret <vscale x 2 x i1> %r
+}
+
+define <vscale x 4 x i8> @sext_nxv4i1_nxv4i8(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: sext_nxv4i1_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 4 x i1> %v to <vscale x 4 x i8>
+  ret <vscale x 4 x i8> %r
+}
+
+define <vscale x 4 x i8> @zext_nxv4i1_nxv4i8(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: zext_nxv4i1_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 4 x i1> %v to <vscale x 4 x i8>
+  ret <vscale x 4 x i8> %r
+}
+
+define <vscale x 4 x i1> @trunc_nxv4i8_nxv4i1(<vscale x 4 x i8> %v) {
+; CHECK-LABEL: trunc_nxv4i8_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 4 x i8> %v to <vscale x 4 x i1>
+  ret <vscale x 4 x i1> %r
+}
+
+define <vscale x 8 x i8> @sext_nxv8i1_nxv8i8(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: sext_nxv8i1_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 8 x i1> %v to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %r
+}
+
+define <vscale x 8 x i8> @zext_nxv8i1_nxv8i8(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: zext_nxv8i1_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 8 x i1> %v to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %r
+}
+
+define <vscale x 8 x i1> @trunc_nxv8i8_nxv8i1(<vscale x 8 x i8> %v) {
+; CHECK-LABEL: trunc_nxv8i8_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 8 x i8> %v to <vscale x 8 x i1>
+  ret <vscale x 8 x i1> %r
+}
+
+define <vscale x 16 x i8> @sext_nxv16i1_nxv16i8(<vscale x 16 x i1> %v) {
+; CHECK-LABEL: sext_nxv16i1_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 16 x i1> %v to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %r
+}
+
+define <vscale x 16 x i8> @zext_nxv16i1_nxv16i8(<vscale x 16 x i1> %v) {
+; CHECK-LABEL: zext_nxv16i1_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 16 x i1> %v to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %r
+}
+
+define <vscale x 16 x i1> @trunc_nxv16i8_nxv16i1(<vscale x 16 x i8> %v) {
+; CHECK-LABEL: trunc_nxv16i8_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vand.vi v26, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v26, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 16 x i8> %v to <vscale x 16 x i1>
+  ret <vscale x 16 x i1> %r
+}
+
+define <vscale x 32 x i8> @sext_nxv32i1_nxv32i8(<vscale x 32 x i1> %v) {
+; CHECK-LABEL: sext_nxv32i1_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 32 x i1> %v to <vscale x 32 x i8>
+  ret <vscale x 32 x i8> %r
+}
+
+define <vscale x 32 x i8> @zext_nxv32i1_nxv32i8(<vscale x 32 x i1> %v) {
+; CHECK-LABEL: zext_nxv32i1_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 32 x i1> %v to <vscale x 32 x i8>
+  ret <vscale x 32 x i8> %r
+}
+
+define <vscale x 32 x i1> @trunc_nxv32i8_nxv32i1(<vscale x 32 x i8> %v) {
+; CHECK-LABEL: trunc_nxv32i8_nxv32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vand.vi v28, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v28, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 32 x i8> %v to <vscale x 32 x i1>
+  ret <vscale x 32 x i1> %r
+}
+
+define <vscale x 64 x i8> @sext_nxv64i1_nxv64i8(<vscale x 64 x i1> %v) {
+; CHECK-LABEL: sext_nxv64i1_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 64 x i1> %v to <vscale x 64 x i8>
+  ret <vscale x 64 x i8> %r
+}
+
+define <vscale x 64 x i8> @zext_nxv64i1_nxv64i8(<vscale x 64 x i1> %v) {
+; CHECK-LABEL: zext_nxv64i1_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 64 x i1> %v to <vscale x 64 x i8>
+  ret <vscale x 64 x i8> %r
+}
+
+define <vscale x 64 x i1> @trunc_nxv64i8_nxv64i1(<vscale x 64 x i8> %v) {
+; CHECK-LABEL: trunc_nxv64i8_nxv64i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m8,ta,mu
+; CHECK-NEXT:    vand.vi v8, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 64 x i8> %v to <vscale x 64 x i1>
+  ret <vscale x 64 x i1> %r
+}
+
+define <vscale x 1 x i16> @sext_nxv1i1_nxv1i16(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: sext_nxv1i1_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 1 x i1> %v to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %r
+}
+
+define <vscale x 1 x i16> @zext_nxv1i1_nxv1i16(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: zext_nxv1i1_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 1 x i1> %v to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %r
+}
+
+define <vscale x 1 x i1> @trunc_nxv1i16_nxv1i1(<vscale x 1 x i16> %v) {
+; CHECK-LABEL: trunc_nxv1i16_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 1 x i16> %v to <vscale x 1 x i1>
+  ret <vscale x 1 x i1> %r
+}
+
+define <vscale x 2 x i16> @sext_nxv2i1_nxv2i16(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: sext_nxv2i1_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 2 x i1> %v to <vscale x 2 x i16>
+  ret <vscale x 2 x i16> %r
+}
+
+define <vscale x 2 x i16> @zext_nxv2i1_nxv2i16(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: zext_nxv2i1_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 2 x i1> %v to <vscale x 2 x i16>
+  ret <vscale x 2 x i16> %r
+}
+
+define <vscale x 2 x i1> @trunc_nxv2i16_nxv2i1(<vscale x 2 x i16> %v) {
+; CHECK-LABEL: trunc_nxv2i16_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 2 x i16> %v to <vscale x 2 x i1>
+  ret <vscale x 2 x i1> %r
+}
+
+define <vscale x 4 x i16> @sext_nxv4i1_nxv4i16(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: sext_nxv4i1_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 4 x i1> %v to <vscale x 4 x i16>
+  ret <vscale x 4 x i16> %r
+}
+
+define <vscale x 4 x i16> @zext_nxv4i1_nxv4i16(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: zext_nxv4i1_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 4 x i1> %v to <vscale x 4 x i16>
+  ret <vscale x 4 x i16> %r
+}
+
+define <vscale x 4 x i1> @trunc_nxv4i16_nxv4i1(<vscale x 4 x i16> %v) {
+; CHECK-LABEL: trunc_nxv4i16_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 4 x i16> %v to <vscale x 4 x i1>
+  ret <vscale x 4 x i1> %r
+}
+
+define <vscale x 8 x i16> @sext_nxv8i1_nxv8i16(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: sext_nxv8i1_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 8 x i1> %v to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %r
+}
+
+define <vscale x 8 x i16> @zext_nxv8i1_nxv8i16(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: zext_nxv8i1_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 8 x i1> %v to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %r
+}
+
+define <vscale x 8 x i1> @trunc_nxv8i16_nxv8i1(<vscale x 8 x i16> %v) {
+; CHECK-LABEL: trunc_nxv8i16_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vand.vi v26, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v26, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 8 x i16> %v to <vscale x 8 x i1>
+  ret <vscale x 8 x i1> %r
+}
+
+define <vscale x 16 x i16> @sext_nxv16i1_nxv16i16(<vscale x 16 x i1> %v) {
+; CHECK-LABEL: sext_nxv16i1_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 16 x i1> %v to <vscale x 16 x i16>
+  ret <vscale x 16 x i16> %r
+}
+
+define <vscale x 16 x i16> @zext_nxv16i1_nxv16i16(<vscale x 16 x i1> %v) {
+; CHECK-LABEL: zext_nxv16i1_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 16 x i1> %v to <vscale x 16 x i16>
+  ret <vscale x 16 x i16> %r
+}
+
+define <vscale x 16 x i1> @trunc_nxv16i16_nxv16i1(<vscale x 16 x i16> %v) {
+; CHECK-LABEL: trunc_nxv16i16_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vand.vi v28, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v28, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 16 x i16> %v to <vscale x 16 x i1>
+  ret <vscale x 16 x i1> %r
+}
+
+define <vscale x 32 x i16> @sext_nxv32i1_nxv32i16(<vscale x 32 x i1> %v) {
+; CHECK-LABEL: sext_nxv32i1_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 32 x i1> %v to <vscale x 32 x i16>
+  ret <vscale x 32 x i16> %r
+}
+
+define <vscale x 32 x i16> @zext_nxv32i1_nxv32i16(<vscale x 32 x i1> %v) {
+; CHECK-LABEL: zext_nxv32i1_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 32 x i1> %v to <vscale x 32 x i16>
+  ret <vscale x 32 x i16> %r
+}
+
+define <vscale x 32 x i1> @trunc_nxv32i16_nxv32i1(<vscale x 32 x i16> %v) {
+; CHECK-LABEL: trunc_nxv32i16_nxv32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vand.vi v8, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 32 x i16> %v to <vscale x 32 x i1>
+  ret <vscale x 32 x i1> %r
+}
+
+define <vscale x 1 x i32> @sext_nxv1i1_nxv1i32(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: sext_nxv1i1_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 1 x i1> %v to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %r
+}
+
+define <vscale x 1 x i32> @zext_nxv1i1_nxv1i32(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: zext_nxv1i1_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 1 x i1> %v to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %r
+}
+
+define <vscale x 1 x i1> @trunc_nxv1i32_nxv1i1(<vscale x 1 x i32> %v) {
+; CHECK-LABEL: trunc_nxv1i32_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 1 x i32> %v to <vscale x 1 x i1>
+  ret <vscale x 1 x i1> %r
+}
+
+define <vscale x 2 x i32> @sext_nxv2i1_nxv2i32(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: sext_nxv2i1_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 2 x i1> %v to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %r
+}
+
+define <vscale x 2 x i32> @zext_nxv2i1_nxv2i32(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: zext_nxv2i1_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 2 x i1> %v to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %r
+}
+
+define <vscale x 2 x i1> @trunc_nxv2i32_nxv2i1(<vscale x 2 x i32> %v) {
+; CHECK-LABEL: trunc_nxv2i32_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 2 x i32> %v to <vscale x 2 x i1>
+  ret <vscale x 2 x i1> %r
+}
+
+define <vscale x 4 x i32> @sext_nxv4i1_nxv4i32(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: sext_nxv4i1_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 4 x i1> %v to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 4 x i32> @zext_nxv4i1_nxv4i32(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: zext_nxv4i1_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 4 x i1> %v to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 4 x i1> @trunc_nxv4i32_nxv4i1(<vscale x 4 x i32> %v) {
+; CHECK-LABEL: trunc_nxv4i32_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vand.vi v26, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v26, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 4 x i32> %v to <vscale x 4 x i1>
+  ret <vscale x 4 x i1> %r
+}
+
+define <vscale x 8 x i32> @sext_nxv8i1_nxv8i32(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: sext_nxv8i1_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 8 x i1> %v to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %r
+}
+
+define <vscale x 8 x i32> @zext_nxv8i1_nxv8i32(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: zext_nxv8i1_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 8 x i1> %v to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %r
+}
+
+define <vscale x 8 x i1> @trunc_nxv8i32_nxv8i1(<vscale x 8 x i32> %v) {
+; CHECK-LABEL: trunc_nxv8i32_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vand.vi v28, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v28, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 8 x i32> %v to <vscale x 8 x i1>
+  ret <vscale x 8 x i1> %r
+}
+
+define <vscale x 16 x i32> @sext_nxv16i1_nxv16i32(<vscale x 16 x i1> %v) {
+; CHECK-LABEL: sext_nxv16i1_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 16 x i1> %v to <vscale x 16 x i32>
+  ret <vscale x 16 x i32> %r
+}
+
+define <vscale x 16 x i32> @zext_nxv16i1_nxv16i32(<vscale x 16 x i1> %v) {
+; CHECK-LABEL: zext_nxv16i1_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 16 x i1> %v to <vscale x 16 x i32>
+  ret <vscale x 16 x i32> %r
+}
+
+define <vscale x 16 x i1> @trunc_nxv16i32_nxv16i1(<vscale x 16 x i32> %v) {
+; CHECK-LABEL: trunc_nxv16i32_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vand.vi v8, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 16 x i32> %v to <vscale x 16 x i1>
+  ret <vscale x 16 x i1> %r
+}
+
+define <vscale x 1 x i64> @sext_nxv1i1_nxv1i64(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: sext_nxv1i1_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 1 x i1> %v to <vscale x 1 x i64>
+  ret <vscale x 1 x i64> %r
+}
+
+define <vscale x 1 x i64> @zext_nxv1i1_nxv1i64(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: zext_nxv1i1_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 1 x i1> %v to <vscale x 1 x i64>
+  ret <vscale x 1 x i64> %r
+}
+
+define <vscale x 1 x i1> @trunc_nxv1i64_nxv1i1(<vscale x 1 x i64> %v) {
+; CHECK-LABEL: trunc_nxv1i64_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 1 x i64> %v to <vscale x 1 x i1>
+  ret <vscale x 1 x i1> %r
+}
+
+define <vscale x 2 x i64> @sext_nxv2i1_nxv2i64(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: sext_nxv2i1_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 2 x i1> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 2 x i64> @zext_nxv2i1_nxv2i64(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: zext_nxv2i1_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 2 x i1> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 2 x i1> @trunc_nxv2i64_nxv2i1(<vscale x 2 x i64> %v) {
+; CHECK-LABEL: trunc_nxv2i64_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vand.vi v26, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v26, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 2 x i64> %v to <vscale x 2 x i1>
+  ret <vscale x 2 x i1> %r
+}
+
+define <vscale x 4 x i64> @sext_nxv4i1_nxv4i64(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: sext_nxv4i1_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 4 x i1> %v to <vscale x 4 x i64>
+  ret <vscale x 4 x i64> %r
+}
+
+define <vscale x 4 x i64> @zext_nxv4i1_nxv4i64(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: zext_nxv4i1_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 4 x i1> %v to <vscale x 4 x i64>
+  ret <vscale x 4 x i64> %r
+}
+
+define <vscale x 4 x i1> @trunc_nxv4i64_nxv4i1(<vscale x 4 x i64> %v) {
+; CHECK-LABEL: trunc_nxv4i64_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vand.vi v28, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v28, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 4 x i64> %v to <vscale x 4 x i1>
+  ret <vscale x 4 x i1> %r
+}
+
+define <vscale x 8 x i64> @sext_nxv8i1_nxv8i64(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: sext_nxv8i1_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 8 x i1> %v to <vscale x 8 x i64>
+  ret <vscale x 8 x i64> %r
+}
+
+define <vscale x 8 x i64> @zext_nxv8i1_nxv8i64(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: zext_nxv8i1_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 8 x i1> %v to <vscale x 8 x i64>
+  ret <vscale x 8 x i64> %r
+}
+
+define <vscale x 8 x i1> @trunc_nxv8i64_nxv8i1(<vscale x 8 x i64> %v) {
+; CHECK-LABEL: trunc_nxv8i64_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vand.vi v8, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 8 x i64> %v to <vscale x 8 x i1>
+  ret <vscale x 8 x i1> %r
+}
+

diff  --git a/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv64.ll
new file mode 100644
index 000000000000..d2264cbf0d2c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/mask-exts-truncs-rv64.ll
@@ -0,0 +1,729 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
+
+define <vscale x 1 x i8> @sext_nxv1i1_nxv1i8(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: sext_nxv1i1_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 1 x i1> %v to <vscale x 1 x i8>
+  ret <vscale x 1 x i8> %r
+}
+
+define <vscale x 1 x i8> @zext_nxv1i1_nxv1i8(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: zext_nxv1i1_nxv1i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 1 x i1> %v to <vscale x 1 x i8>
+  ret <vscale x 1 x i8> %r
+}
+
+define <vscale x 1 x i1> @trunc_nxv1i8_nxv1i1(<vscale x 1 x i8> %v) {
+; CHECK-LABEL: trunc_nxv1i8_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf8,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 1 x i8> %v to <vscale x 1 x i1>
+  ret <vscale x 1 x i1> %r
+}
+
+define <vscale x 2 x i8> @sext_nxv2i1_nxv2i8(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: sext_nxv2i1_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 2 x i1> %v to <vscale x 2 x i8>
+  ret <vscale x 2 x i8> %r
+}
+
+define <vscale x 2 x i8> @zext_nxv2i1_nxv2i8(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: zext_nxv2i1_nxv2i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 2 x i1> %v to <vscale x 2 x i8>
+  ret <vscale x 2 x i8> %r
+}
+
+define <vscale x 2 x i1> @trunc_nxv2i8_nxv2i1(<vscale x 2 x i8> %v) {
+; CHECK-LABEL: trunc_nxv2i8_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf4,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 2 x i8> %v to <vscale x 2 x i1>
+  ret <vscale x 2 x i1> %r
+}
+
+define <vscale x 4 x i8> @sext_nxv4i1_nxv4i8(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: sext_nxv4i1_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 4 x i1> %v to <vscale x 4 x i8>
+  ret <vscale x 4 x i8> %r
+}
+
+define <vscale x 4 x i8> @zext_nxv4i1_nxv4i8(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: zext_nxv4i1_nxv4i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 4 x i1> %v to <vscale x 4 x i8>
+  ret <vscale x 4 x i8> %r
+}
+
+define <vscale x 4 x i1> @trunc_nxv4i8_nxv4i1(<vscale x 4 x i8> %v) {
+; CHECK-LABEL: trunc_nxv4i8_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,mf2,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 4 x i8> %v to <vscale x 4 x i1>
+  ret <vscale x 4 x i1> %r
+}
+
+define <vscale x 8 x i8> @sext_nxv8i1_nxv8i8(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: sext_nxv8i1_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 8 x i1> %v to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %r
+}
+
+define <vscale x 8 x i8> @zext_nxv8i1_nxv8i8(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: zext_nxv8i1_nxv8i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 8 x i1> %v to <vscale x 8 x i8>
+  ret <vscale x 8 x i8> %r
+}
+
+define <vscale x 8 x i1> @trunc_nxv8i8_nxv8i1(<vscale x 8 x i8> %v) {
+; CHECK-LABEL: trunc_nxv8i8_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m1,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 8 x i8> %v to <vscale x 8 x i1>
+  ret <vscale x 8 x i1> %r
+}
+
+define <vscale x 16 x i8> @sext_nxv16i1_nxv16i8(<vscale x 16 x i1> %v) {
+; CHECK-LABEL: sext_nxv16i1_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 16 x i1> %v to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %r
+}
+
+define <vscale x 16 x i8> @zext_nxv16i1_nxv16i8(<vscale x 16 x i1> %v) {
+; CHECK-LABEL: zext_nxv16i1_nxv16i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 16 x i1> %v to <vscale x 16 x i8>
+  ret <vscale x 16 x i8> %r
+}
+
+define <vscale x 16 x i1> @trunc_nxv16i8_nxv16i1(<vscale x 16 x i8> %v) {
+; CHECK-LABEL: trunc_nxv16i8_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m2,ta,mu
+; CHECK-NEXT:    vand.vi v26, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v26, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 16 x i8> %v to <vscale x 16 x i1>
+  ret <vscale x 16 x i1> %r
+}
+
+define <vscale x 32 x i8> @sext_nxv32i1_nxv32i8(<vscale x 32 x i1> %v) {
+; CHECK-LABEL: sext_nxv32i1_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 32 x i1> %v to <vscale x 32 x i8>
+  ret <vscale x 32 x i8> %r
+}
+
+define <vscale x 32 x i8> @zext_nxv32i1_nxv32i8(<vscale x 32 x i1> %v) {
+; CHECK-LABEL: zext_nxv32i1_nxv32i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 32 x i1> %v to <vscale x 32 x i8>
+  ret <vscale x 32 x i8> %r
+}
+
+define <vscale x 32 x i1> @trunc_nxv32i8_nxv32i1(<vscale x 32 x i8> %v) {
+; CHECK-LABEL: trunc_nxv32i8_nxv32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m4,ta,mu
+; CHECK-NEXT:    vand.vi v28, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v28, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 32 x i8> %v to <vscale x 32 x i1>
+  ret <vscale x 32 x i1> %r
+}
+
+define <vscale x 64 x i8> @sext_nxv64i1_nxv64i8(<vscale x 64 x i1> %v) {
+; CHECK-LABEL: sext_nxv64i1_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 64 x i1> %v to <vscale x 64 x i8>
+  ret <vscale x 64 x i8> %r
+}
+
+define <vscale x 64 x i8> @zext_nxv64i1_nxv64i8(<vscale x 64 x i1> %v) {
+; CHECK-LABEL: zext_nxv64i1_nxv64i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 64 x i1> %v to <vscale x 64 x i8>
+  ret <vscale x 64 x i8> %r
+}
+
+define <vscale x 64 x i1> @trunc_nxv64i8_nxv64i1(<vscale x 64 x i8> %v) {
+; CHECK-LABEL: trunc_nxv64i8_nxv64i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e8,m8,ta,mu
+; CHECK-NEXT:    vand.vi v8, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 64 x i8> %v to <vscale x 64 x i1>
+  ret <vscale x 64 x i1> %r
+}
+
+define <vscale x 1 x i16> @sext_nxv1i1_nxv1i16(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: sext_nxv1i1_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 1 x i1> %v to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %r
+}
+
+define <vscale x 1 x i16> @zext_nxv1i1_nxv1i16(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: zext_nxv1i1_nxv1i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 1 x i1> %v to <vscale x 1 x i16>
+  ret <vscale x 1 x i16> %r
+}
+
+define <vscale x 1 x i1> @trunc_nxv1i16_nxv1i1(<vscale x 1 x i16> %v) {
+; CHECK-LABEL: trunc_nxv1i16_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 1 x i16> %v to <vscale x 1 x i1>
+  ret <vscale x 1 x i1> %r
+}
+
+define <vscale x 2 x i16> @sext_nxv2i1_nxv2i16(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: sext_nxv2i1_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 2 x i1> %v to <vscale x 2 x i16>
+  ret <vscale x 2 x i16> %r
+}
+
+define <vscale x 2 x i16> @zext_nxv2i1_nxv2i16(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: zext_nxv2i1_nxv2i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 2 x i1> %v to <vscale x 2 x i16>
+  ret <vscale x 2 x i16> %r
+}
+
+define <vscale x 2 x i1> @trunc_nxv2i16_nxv2i1(<vscale x 2 x i16> %v) {
+; CHECK-LABEL: trunc_nxv2i16_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 2 x i16> %v to <vscale x 2 x i1>
+  ret <vscale x 2 x i1> %r
+}
+
+define <vscale x 4 x i16> @sext_nxv4i1_nxv4i16(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: sext_nxv4i1_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 4 x i1> %v to <vscale x 4 x i16>
+  ret <vscale x 4 x i16> %r
+}
+
+define <vscale x 4 x i16> @zext_nxv4i1_nxv4i16(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: zext_nxv4i1_nxv4i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 4 x i1> %v to <vscale x 4 x i16>
+  ret <vscale x 4 x i16> %r
+}
+
+define <vscale x 4 x i1> @trunc_nxv4i16_nxv4i1(<vscale x 4 x i16> %v) {
+; CHECK-LABEL: trunc_nxv4i16_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 4 x i16> %v to <vscale x 4 x i1>
+  ret <vscale x 4 x i1> %r
+}
+
+define <vscale x 8 x i16> @sext_nxv8i1_nxv8i16(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: sext_nxv8i1_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 8 x i1> %v to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %r
+}
+
+define <vscale x 8 x i16> @zext_nxv8i1_nxv8i16(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: zext_nxv8i1_nxv8i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 8 x i1> %v to <vscale x 8 x i16>
+  ret <vscale x 8 x i16> %r
+}
+
+define <vscale x 8 x i1> @trunc_nxv8i16_nxv8i1(<vscale x 8 x i16> %v) {
+; CHECK-LABEL: trunc_nxv8i16_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT:    vand.vi v26, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v26, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 8 x i16> %v to <vscale x 8 x i1>
+  ret <vscale x 8 x i1> %r
+}
+
+define <vscale x 16 x i16> @sext_nxv16i1_nxv16i16(<vscale x 16 x i1> %v) {
+; CHECK-LABEL: sext_nxv16i1_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 16 x i1> %v to <vscale x 16 x i16>
+  ret <vscale x 16 x i16> %r
+}
+
+define <vscale x 16 x i16> @zext_nxv16i1_nxv16i16(<vscale x 16 x i1> %v) {
+; CHECK-LABEL: zext_nxv16i1_nxv16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 16 x i1> %v to <vscale x 16 x i16>
+  ret <vscale x 16 x i16> %r
+}
+
+define <vscale x 16 x i1> @trunc_nxv16i16_nxv16i1(<vscale x 16 x i16> %v) {
+; CHECK-LABEL: trunc_nxv16i16_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT:    vand.vi v28, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v28, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 16 x i16> %v to <vscale x 16 x i1>
+  ret <vscale x 16 x i1> %r
+}
+
+define <vscale x 32 x i16> @sext_nxv32i1_nxv32i16(<vscale x 32 x i1> %v) {
+; CHECK-LABEL: sext_nxv32i1_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 32 x i1> %v to <vscale x 32 x i16>
+  ret <vscale x 32 x i16> %r
+}
+
+define <vscale x 32 x i16> @zext_nxv32i1_nxv32i16(<vscale x 32 x i1> %v) {
+; CHECK-LABEL: zext_nxv32i1_nxv32i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 32 x i1> %v to <vscale x 32 x i16>
+  ret <vscale x 32 x i16> %r
+}
+
+define <vscale x 32 x i1> @trunc_nxv32i16_nxv32i1(<vscale x 32 x i16> %v) {
+; CHECK-LABEL: trunc_nxv32i16_nxv32i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT:    vand.vi v8, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 32 x i16> %v to <vscale x 32 x i1>
+  ret <vscale x 32 x i1> %r
+}
+
+define <vscale x 1 x i32> @sext_nxv1i1_nxv1i32(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: sext_nxv1i1_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 1 x i1> %v to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %r
+}
+
+define <vscale x 1 x i32> @zext_nxv1i1_nxv1i32(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: zext_nxv1i1_nxv1i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 1 x i1> %v to <vscale x 1 x i32>
+  ret <vscale x 1 x i32> %r
+}
+
+define <vscale x 1 x i1> @trunc_nxv1i32_nxv1i1(<vscale x 1 x i32> %v) {
+; CHECK-LABEL: trunc_nxv1i32_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 1 x i32> %v to <vscale x 1 x i1>
+  ret <vscale x 1 x i1> %r
+}
+
+define <vscale x 2 x i32> @sext_nxv2i1_nxv2i32(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: sext_nxv2i1_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 2 x i1> %v to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %r
+}
+
+define <vscale x 2 x i32> @zext_nxv2i1_nxv2i32(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: zext_nxv2i1_nxv2i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 2 x i1> %v to <vscale x 2 x i32>
+  ret <vscale x 2 x i32> %r
+}
+
+define <vscale x 2 x i1> @trunc_nxv2i32_nxv2i1(<vscale x 2 x i32> %v) {
+; CHECK-LABEL: trunc_nxv2i32_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 2 x i32> %v to <vscale x 2 x i1>
+  ret <vscale x 2 x i1> %r
+}
+
+define <vscale x 4 x i32> @sext_nxv4i1_nxv4i32(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: sext_nxv4i1_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 4 x i1> %v to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 4 x i32> @zext_nxv4i1_nxv4i32(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: zext_nxv4i1_nxv4i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 4 x i1> %v to <vscale x 4 x i32>
+  ret <vscale x 4 x i32> %r
+}
+
+define <vscale x 4 x i1> @trunc_nxv4i32_nxv4i1(<vscale x 4 x i32> %v) {
+; CHECK-LABEL: trunc_nxv4i32_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT:    vand.vi v26, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v26, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 4 x i32> %v to <vscale x 4 x i1>
+  ret <vscale x 4 x i1> %r
+}
+
+define <vscale x 8 x i32> @sext_nxv8i1_nxv8i32(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: sext_nxv8i1_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 8 x i1> %v to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %r
+}
+
+define <vscale x 8 x i32> @zext_nxv8i1_nxv8i32(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: zext_nxv8i1_nxv8i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 8 x i1> %v to <vscale x 8 x i32>
+  ret <vscale x 8 x i32> %r
+}
+
+define <vscale x 8 x i1> @trunc_nxv8i32_nxv8i1(<vscale x 8 x i32> %v) {
+; CHECK-LABEL: trunc_nxv8i32_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT:    vand.vi v28, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v28, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 8 x i32> %v to <vscale x 8 x i1>
+  ret <vscale x 8 x i1> %r
+}
+
+define <vscale x 16 x i32> @sext_nxv16i1_nxv16i32(<vscale x 16 x i1> %v) {
+; CHECK-LABEL: sext_nxv16i1_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 16 x i1> %v to <vscale x 16 x i32>
+  ret <vscale x 16 x i32> %r
+}
+
+define <vscale x 16 x i32> @zext_nxv16i1_nxv16i32(<vscale x 16 x i1> %v) {
+; CHECK-LABEL: zext_nxv16i1_nxv16i32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 16 x i1> %v to <vscale x 16 x i32>
+  ret <vscale x 16 x i32> %r
+}
+
+define <vscale x 16 x i1> @trunc_nxv16i32_nxv16i1(<vscale x 16 x i32> %v) {
+; CHECK-LABEL: trunc_nxv16i32_nxv16i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT:    vand.vi v8, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 16 x i32> %v to <vscale x 16 x i1>
+  ret <vscale x 16 x i1> %r
+}
+
+define <vscale x 1 x i64> @sext_nxv1i1_nxv1i64(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: sext_nxv1i1_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 1 x i1> %v to <vscale x 1 x i64>
+  ret <vscale x 1 x i64> %r
+}
+
+define <vscale x 1 x i64> @zext_nxv1i1_nxv1i64(<vscale x 1 x i1> %v) {
+; CHECK-LABEL: zext_nxv1i1_nxv1i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vmv.v.i v25, 0
+; CHECK-NEXT:    vmerge.vim v16, v25, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 1 x i1> %v to <vscale x 1 x i64>
+  ret <vscale x 1 x i64> %r
+}
+
+define <vscale x 1 x i1> @trunc_nxv1i64_nxv1i1(<vscale x 1 x i64> %v) {
+; CHECK-LABEL: trunc_nxv1i64_nxv1i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT:    vand.vi v25, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v25, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 1 x i64> %v to <vscale x 1 x i1>
+  ret <vscale x 1 x i1> %r
+}
+
+define <vscale x 2 x i64> @sext_nxv2i1_nxv2i64(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: sext_nxv2i1_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 2 x i1> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 2 x i64> @zext_nxv2i1_nxv2i64(<vscale x 2 x i1> %v) {
+; CHECK-LABEL: zext_nxv2i1_nxv2i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vmv.v.i v26, 0
+; CHECK-NEXT:    vmerge.vim v16, v26, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 2 x i1> %v to <vscale x 2 x i64>
+  ret <vscale x 2 x i64> %r
+}
+
+define <vscale x 2 x i1> @trunc_nxv2i64_nxv2i1(<vscale x 2 x i64> %v) {
+; CHECK-LABEL: trunc_nxv2i64_nxv2i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT:    vand.vi v26, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v26, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 2 x i64> %v to <vscale x 2 x i1>
+  ret <vscale x 2 x i1> %r
+}
+
+define <vscale x 4 x i64> @sext_nxv4i1_nxv4i64(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: sext_nxv4i1_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 4 x i1> %v to <vscale x 4 x i64>
+  ret <vscale x 4 x i64> %r
+}
+
+define <vscale x 4 x i64> @zext_nxv4i1_nxv4i64(<vscale x 4 x i1> %v) {
+; CHECK-LABEL: zext_nxv4i1_nxv4i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vmv.v.i v28, 0
+; CHECK-NEXT:    vmerge.vim v16, v28, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 4 x i1> %v to <vscale x 4 x i64>
+  ret <vscale x 4 x i64> %r
+}
+
+define <vscale x 4 x i1> @trunc_nxv4i64_nxv4i1(<vscale x 4 x i64> %v) {
+; CHECK-LABEL: trunc_nxv4i64_nxv4i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT:    vand.vi v28, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v28, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 4 x i64> %v to <vscale x 4 x i1>
+  ret <vscale x 4 x i1> %r
+}
+
+define <vscale x 8 x i64> @sext_nxv8i1_nxv8i64(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: sext_nxv8i1_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, -1, v0
+; CHECK-NEXT:    ret
+  %r = sext <vscale x 8 x i1> %v to <vscale x 8 x i64>
+  ret <vscale x 8 x i64> %r
+}
+
+define <vscale x 8 x i64> @zext_nxv8i1_nxv8i64(<vscale x 8 x i1> %v) {
+; CHECK-LABEL: zext_nxv8i1_nxv8i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vmv.v.i v8, 0
+; CHECK-NEXT:    vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT:    ret
+  %r = zext <vscale x 8 x i1> %v to <vscale x 8 x i64>
+  ret <vscale x 8 x i64> %r
+}
+
+define <vscale x 8 x i1> @trunc_nxv8i64_nxv8i1(<vscale x 8 x i64> %v) {
+; CHECK-LABEL: trunc_nxv8i64_nxv8i1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT:    vand.vi v8, v16, 1
+; CHECK-NEXT:    vmsne.vi v0, v8, 0
+; CHECK-NEXT:    ret
+  %r = trunc <vscale x 8 x i64> %v to <vscale x 8 x i1>
+  ret <vscale x 8 x i1> %r
+}
+


        


More information about the llvm-branch-commits mailing list