[llvm] 4060b81 - [RISCV] Obey -riscv-v-fixed-length-vector-elen-max when lowering mask BUILD_VECTORs.

Craig Topper via llvm-commits llvm-commits at lists.llvm.org
Wed Jan 19 10:48:41 PST 2022


Author: Craig Topper
Date: 2022-01-19T10:47:37-08:00
New Revision: 4060b81e76e6e50896f97f44022954c7e214490a

URL: https://github.com/llvm/llvm-project/commit/4060b81e76e6e50896f97f44022954c7e214490a
DIFF: https://github.com/llvm/llvm-project/commit/4060b81e76e6e50896f97f44022954c7e214490a.diff

LOG: [RISCV] Obey -riscv-v-fixed-length-vector-elen-max when lowering mask BUILD_VECTORs.

We may not be allowed to use vXiXLen vectors. Consult ELEN to
determine what is allowed. This will become even more important
when Zve32 is added.

Reviewed By: frasercrmck, arcbbb

Differential Revision: https://reviews.llvm.org/D117518

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVISelLowering.cpp
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 9f720b674bfba..4e4236678402d 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1929,6 +1929,8 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
     // codegen across RV32 and RV64.
     unsigned NumViaIntegerBits =
         std::min(std::max(NumElts, 8u), Subtarget.getXLen());
+    NumViaIntegerBits = std::min(NumViaIntegerBits,
+                                 Subtarget.getMaxELENForFixedLengthVectors());
     if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
       // If we have to use more than one INSERT_VECTOR_ELT then this
       // optimization is likely to increase code size; avoid peforming it in

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
index f4294c78713fd..56b3e3d89cb72 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
@@ -7,6 +7,13 @@
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=4 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64,RV64-LMULMAX4
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV32,RV32-LMULMAX8
 ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,CHECK-RV64,RV64-LMULMAX8
+; Test with ELEN limited
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-elen-max=32 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32-ELEN,RV32-ELEN32
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-elen-max=32 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64-ELEN,RV64-ELEN32
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-elen-max=16 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32-ELEN,RV32-ELEN16
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-elen-max=16 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64-ELEN,RV64-ELEN16
+; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-elen-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV32-ELEN,RV32-ELEN8
+; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-elen-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=RV64-ELEN,RV64-ELEN8
 
 define <1 x i1> @buildvec_mask_nonconst_v1i1(i1 %x) {
 ; CHECK-LABEL: buildvec_mask_nonconst_v1i1:
@@ -16,6 +23,54 @@ define <1 x i1> @buildvec_mask_nonconst_v1i1(i1 %x) {
 ; CHECK-NEXT:    vmv.v.x v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_nonconst_v1i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    andi a0, a0, 1
+; RV32-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV32-ELEN32-NEXT:    vmv.v.x v8, a0
+; RV32-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_nonconst_v1i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    andi a0, a0, 1
+; RV64-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV64-ELEN32-NEXT:    vmv.v.x v8, a0
+; RV64-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_nonconst_v1i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    andi a0, a0, 1
+; RV32-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    vmv.v.x v8, a0
+; RV32-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_nonconst_v1i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    andi a0, a0, 1
+; RV64-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    vmv.v.x v8, a0
+; RV64-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_nonconst_v1i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    andi a0, a0, 1
+; RV32-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.v.x v8, a0
+; RV32-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_nonconst_v1i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    andi a0, a0, 1
+; RV64-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.v.x v8, a0
+; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN8-NEXT:    ret
   %1 = insertelement <1 x i1> undef, i1 %x, i32 0
   ret <1 x i1> %1
 }
@@ -28,6 +83,54 @@ define <1 x i1> @buildvec_mask_optsize_nonconst_v1i1(i1 %x) optsize {
 ; CHECK-NEXT:    vmv.v.x v8, a0
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_optsize_nonconst_v1i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    andi a0, a0, 1
+; RV32-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV32-ELEN32-NEXT:    vmv.v.x v8, a0
+; RV32-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_optsize_nonconst_v1i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    andi a0, a0, 1
+; RV64-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV64-ELEN32-NEXT:    vmv.v.x v8, a0
+; RV64-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_optsize_nonconst_v1i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    andi a0, a0, 1
+; RV32-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    vmv.v.x v8, a0
+; RV32-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_optsize_nonconst_v1i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    andi a0, a0, 1
+; RV64-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    vmv.v.x v8, a0
+; RV64-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_optsize_nonconst_v1i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    andi a0, a0, 1
+; RV32-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.v.x v8, a0
+; RV32-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_optsize_nonconst_v1i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    andi a0, a0, 1
+; RV64-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.v.x v8, a0
+; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN8-NEXT:    ret
   %1 = insertelement <1 x i1> undef, i1 %x, i32 0
   ret <1 x i1> %1
 }
@@ -43,6 +146,72 @@ define <2 x i1> @buildvec_mask_nonconst_v2i1(i1 %x, i1 %y) {
 ; CHECK-NEXT:    vand.vi v8, v8, 1
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_nonconst_v2i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
+; RV32-ELEN32-NEXT:    vmv.v.x v8, a1
+; RV32-ELEN32-NEXT:    vsetvli zero, zero, e8, mf4, tu, mu
+; RV32-ELEN32-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN32-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
+; RV32-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_nonconst_v2i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
+; RV64-ELEN32-NEXT:    vmv.v.x v8, a1
+; RV64-ELEN32-NEXT:    vsetvli zero, zero, e8, mf4, tu, mu
+; RV64-ELEN32-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN32-NEXT:    vsetvli zero, zero, e8, mf4, ta, mu
+; RV64-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_nonconst_v2i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    vmv.v.x v8, a1
+; RV32-ELEN16-NEXT:    vsetvli zero, zero, e8, mf2, tu, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN16-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_nonconst_v2i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    vmv.v.x v8, a1
+; RV64-ELEN16-NEXT:    vsetvli zero, zero, e8, mf2, tu, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN16-NEXT:    vsetvli zero, zero, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_nonconst_v2i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.v.x v8, a1
+; RV32-ELEN8-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN8-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_nonconst_v2i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.v.x v8, a1
+; RV64-ELEN8-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN8-NEXT:    vsetvli zero, zero, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN8-NEXT:    ret
   %1 = insertelement <2 x i1> undef, i1 %x, i32 0
   %2 = insertelement <2 x i1> %1,  i1 %y, i32 1
   ret <2 x i1> %2
@@ -63,6 +232,90 @@ define <2 x i1> @buildvec_mask_optsize_nonconst_v2i1(i1 %x, i1 %y) optsize {
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_optsize_nonconst_v2i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    addi sp, sp, -16
+; RV32-ELEN32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN32-NEXT:    sb a1, 15(sp)
+; RV32-ELEN32-NEXT:    sb a0, 14(sp)
+; RV32-ELEN32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
+; RV32-ELEN32-NEXT:    addi a0, sp, 14
+; RV32-ELEN32-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN32-NEXT:    addi sp, sp, 16
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_optsize_nonconst_v2i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    addi sp, sp, -16
+; RV64-ELEN32-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN32-NEXT:    sb a1, 15(sp)
+; RV64-ELEN32-NEXT:    sb a0, 14(sp)
+; RV64-ELEN32-NEXT:    vsetivli zero, 2, e8, mf4, ta, mu
+; RV64-ELEN32-NEXT:    addi a0, sp, 14
+; RV64-ELEN32-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN32-NEXT:    addi sp, sp, 16
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_optsize_nonconst_v2i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    addi sp, sp, -16
+; RV32-ELEN16-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN16-NEXT:    sb a1, 15(sp)
+; RV32-ELEN16-NEXT:    sb a0, 14(sp)
+; RV32-ELEN16-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    addi a0, sp, 14
+; RV32-ELEN16-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN16-NEXT:    addi sp, sp, 16
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_optsize_nonconst_v2i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    addi sp, sp, -16
+; RV64-ELEN16-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN16-NEXT:    sb a1, 15(sp)
+; RV64-ELEN16-NEXT:    sb a0, 14(sp)
+; RV64-ELEN16-NEXT:    vsetivli zero, 2, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    addi a0, sp, 14
+; RV64-ELEN16-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN16-NEXT:    addi sp, sp, 16
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_optsize_nonconst_v2i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    addi sp, sp, -16
+; RV32-ELEN8-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN8-NEXT:    sb a1, 15(sp)
+; RV32-ELEN8-NEXT:    sb a0, 14(sp)
+; RV32-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    addi a0, sp, 14
+; RV32-ELEN8-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN8-NEXT:    addi sp, sp, 16
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_optsize_nonconst_v2i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    addi sp, sp, -16
+; RV64-ELEN8-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN8-NEXT:    sb a1, 15(sp)
+; RV64-ELEN8-NEXT:    sb a0, 14(sp)
+; RV64-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    addi a0, sp, 14
+; RV64-ELEN8-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN8-NEXT:    addi sp, sp, 16
+; RV64-ELEN8-NEXT:    ret
   %1 = insertelement <2 x i1> undef, i1 %x, i32 0
   %2 = insertelement <2 x i1> %1,  i1 %y, i32 1
   ret <2 x i1> %2
@@ -75,6 +328,48 @@ define <3 x i1> @buildvec_mask_v1i1() {
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_v1i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    li a0, 2
+; RV32-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV32-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_v1i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    li a0, 2
+; RV64-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV64-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_v1i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    li a0, 2
+; RV32-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_v1i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    li a0, 2
+; RV64-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_v1i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    li a0, 2
+; RV32-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_v1i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    li a0, 2
+; RV64-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN8-NEXT:    ret
   ret <3 x i1> <i1 0, i1 1, i1 0>
 }
 
@@ -85,6 +380,48 @@ define <3 x i1> @buildvec_mask_optsize_v1i1() optsize {
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_optsize_v1i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    li a0, 2
+; RV32-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV32-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_optsize_v1i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    li a0, 2
+; RV64-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV64-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_optsize_v1i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    li a0, 2
+; RV32-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_optsize_v1i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    li a0, 2
+; RV64-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_optsize_v1i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    li a0, 2
+; RV32-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_optsize_v1i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    li a0, 2
+; RV64-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN8-NEXT:    ret
   ret <3 x i1> <i1 0, i1 1, i1 0>
 }
 
@@ -95,6 +432,48 @@ define <4 x i1> @buildvec_mask_v4i1() {
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_v4i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    li a0, 6
+; RV32-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV32-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_v4i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    li a0, 6
+; RV64-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV64-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_v4i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    li a0, 6
+; RV32-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_v4i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    li a0, 6
+; RV64-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_v4i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    li a0, 6
+; RV32-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_v4i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    li a0, 6
+; RV64-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN8-NEXT:    ret
   ret <4 x i1> <i1 0, i1 1, i1 1, i1 0>
 }
 
@@ -110,6 +489,78 @@ define <4 x i1> @buildvec_mask_nonconst_v4i1(i1 %x, i1 %y) {
 ; CHECK-NEXT:    vand.vi v8, v8, 1
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_nonconst_v4i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    li a2, 3
+; RV32-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV32-ELEN32-NEXT:    vmv.s.x v0, a2
+; RV32-ELEN32-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; RV32-ELEN32-NEXT:    vmv.v.x v8, a1
+; RV32-ELEN32-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV32-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_nonconst_v4i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    li a2, 3
+; RV64-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV64-ELEN32-NEXT:    vmv.s.x v0, a2
+; RV64-ELEN32-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; RV64-ELEN32-NEXT:    vmv.v.x v8, a1
+; RV64-ELEN32-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV64-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_nonconst_v4i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    li a2, 3
+; RV32-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v0, a2
+; RV32-ELEN16-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    vmv.v.x v8, a1
+; RV32-ELEN16-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV32-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_nonconst_v4i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    li a2, 3
+; RV64-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v0, a2
+; RV64-ELEN16-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    vmv.v.x v8, a1
+; RV64-ELEN16-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV64-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_nonconst_v4i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    li a2, 3
+; RV32-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v0, a2
+; RV32-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.v.x v8, a1
+; RV32-ELEN8-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV32-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_nonconst_v4i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    li a2, 3
+; RV64-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v0, a2
+; RV64-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.v.x v8, a1
+; RV64-ELEN8-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV64-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN8-NEXT:    ret
   %1 = insertelement <4 x i1> undef, i1 %x, i32 0
   %2 = insertelement <4 x i1> %1,  i1 %x, i32 1
   %3 = insertelement <4 x i1> %2,  i1 %y, i32 2
@@ -134,6 +585,102 @@ define <4 x i1> @buildvec_mask_optsize_nonconst_v4i1(i1 %x, i1 %y) optsize {
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_optsize_nonconst_v4i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    addi sp, sp, -16
+; RV32-ELEN32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN32-NEXT:    sb a1, 15(sp)
+; RV32-ELEN32-NEXT:    sb a1, 14(sp)
+; RV32-ELEN32-NEXT:    sb a0, 13(sp)
+; RV32-ELEN32-NEXT:    sb a0, 12(sp)
+; RV32-ELEN32-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; RV32-ELEN32-NEXT:    addi a0, sp, 12
+; RV32-ELEN32-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN32-NEXT:    addi sp, sp, 16
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_optsize_nonconst_v4i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    addi sp, sp, -16
+; RV64-ELEN32-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN32-NEXT:    sb a1, 15(sp)
+; RV64-ELEN32-NEXT:    sb a1, 14(sp)
+; RV64-ELEN32-NEXT:    sb a0, 13(sp)
+; RV64-ELEN32-NEXT:    sb a0, 12(sp)
+; RV64-ELEN32-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; RV64-ELEN32-NEXT:    addi a0, sp, 12
+; RV64-ELEN32-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN32-NEXT:    addi sp, sp, 16
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_optsize_nonconst_v4i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    addi sp, sp, -16
+; RV32-ELEN16-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN16-NEXT:    sb a1, 15(sp)
+; RV32-ELEN16-NEXT:    sb a1, 14(sp)
+; RV32-ELEN16-NEXT:    sb a0, 13(sp)
+; RV32-ELEN16-NEXT:    sb a0, 12(sp)
+; RV32-ELEN16-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    addi a0, sp, 12
+; RV32-ELEN16-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN16-NEXT:    addi sp, sp, 16
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_optsize_nonconst_v4i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    addi sp, sp, -16
+; RV64-ELEN16-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN16-NEXT:    sb a1, 15(sp)
+; RV64-ELEN16-NEXT:    sb a1, 14(sp)
+; RV64-ELEN16-NEXT:    sb a0, 13(sp)
+; RV64-ELEN16-NEXT:    sb a0, 12(sp)
+; RV64-ELEN16-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    addi a0, sp, 12
+; RV64-ELEN16-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN16-NEXT:    addi sp, sp, 16
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_optsize_nonconst_v4i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    addi sp, sp, -16
+; RV32-ELEN8-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN8-NEXT:    sb a1, 15(sp)
+; RV32-ELEN8-NEXT:    sb a1, 14(sp)
+; RV32-ELEN8-NEXT:    sb a0, 13(sp)
+; RV32-ELEN8-NEXT:    sb a0, 12(sp)
+; RV32-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    addi a0, sp, 12
+; RV32-ELEN8-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN8-NEXT:    addi sp, sp, 16
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_optsize_nonconst_v4i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    addi sp, sp, -16
+; RV64-ELEN8-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN8-NEXT:    sb a1, 15(sp)
+; RV64-ELEN8-NEXT:    sb a1, 14(sp)
+; RV64-ELEN8-NEXT:    sb a0, 13(sp)
+; RV64-ELEN8-NEXT:    sb a0, 12(sp)
+; RV64-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    addi a0, sp, 12
+; RV64-ELEN8-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN8-NEXT:    addi sp, sp, 16
+; RV64-ELEN8-NEXT:    ret
   %1 = insertelement <4 x i1> undef, i1 %x, i32 0
   %2 = insertelement <4 x i1> %1,  i1 %x, i32 1
   %3 = insertelement <4 x i1> %2,  i1 %y, i32 2
@@ -158,6 +705,108 @@ define <4 x i1> @buildvec_mask_nonconst_v4i1_2(i1 %x, i1 %y) {
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_nonconst_v4i1_2:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    addi sp, sp, -16
+; RV32-ELEN32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN32-NEXT:    sb a1, 15(sp)
+; RV32-ELEN32-NEXT:    li a1, 1
+; RV32-ELEN32-NEXT:    sb a1, 14(sp)
+; RV32-ELEN32-NEXT:    sb a0, 13(sp)
+; RV32-ELEN32-NEXT:    sb zero, 12(sp)
+; RV32-ELEN32-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; RV32-ELEN32-NEXT:    addi a0, sp, 12
+; RV32-ELEN32-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN32-NEXT:    addi sp, sp, 16
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_nonconst_v4i1_2:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    addi sp, sp, -16
+; RV64-ELEN32-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN32-NEXT:    sb a1, 15(sp)
+; RV64-ELEN32-NEXT:    li a1, 1
+; RV64-ELEN32-NEXT:    sb a1, 14(sp)
+; RV64-ELEN32-NEXT:    sb a0, 13(sp)
+; RV64-ELEN32-NEXT:    sb zero, 12(sp)
+; RV64-ELEN32-NEXT:    vsetivli zero, 4, e8, mf4, ta, mu
+; RV64-ELEN32-NEXT:    addi a0, sp, 12
+; RV64-ELEN32-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN32-NEXT:    addi sp, sp, 16
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_nonconst_v4i1_2:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    addi sp, sp, -16
+; RV32-ELEN16-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN16-NEXT:    sb a1, 15(sp)
+; RV32-ELEN16-NEXT:    li a1, 1
+; RV32-ELEN16-NEXT:    sb a1, 14(sp)
+; RV32-ELEN16-NEXT:    sb a0, 13(sp)
+; RV32-ELEN16-NEXT:    sb zero, 12(sp)
+; RV32-ELEN16-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    addi a0, sp, 12
+; RV32-ELEN16-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN16-NEXT:    addi sp, sp, 16
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_nonconst_v4i1_2:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    addi sp, sp, -16
+; RV64-ELEN16-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN16-NEXT:    sb a1, 15(sp)
+; RV64-ELEN16-NEXT:    li a1, 1
+; RV64-ELEN16-NEXT:    sb a1, 14(sp)
+; RV64-ELEN16-NEXT:    sb a0, 13(sp)
+; RV64-ELEN16-NEXT:    sb zero, 12(sp)
+; RV64-ELEN16-NEXT:    vsetivli zero, 4, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    addi a0, sp, 12
+; RV64-ELEN16-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN16-NEXT:    addi sp, sp, 16
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_nonconst_v4i1_2:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    addi sp, sp, -16
+; RV32-ELEN8-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN8-NEXT:    sb a1, 15(sp)
+; RV32-ELEN8-NEXT:    li a1, 1
+; RV32-ELEN8-NEXT:    sb a1, 14(sp)
+; RV32-ELEN8-NEXT:    sb a0, 13(sp)
+; RV32-ELEN8-NEXT:    sb zero, 12(sp)
+; RV32-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    addi a0, sp, 12
+; RV32-ELEN8-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN8-NEXT:    addi sp, sp, 16
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_nonconst_v4i1_2:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    addi sp, sp, -16
+; RV64-ELEN8-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN8-NEXT:    sb a1, 15(sp)
+; RV64-ELEN8-NEXT:    li a1, 1
+; RV64-ELEN8-NEXT:    sb a1, 14(sp)
+; RV64-ELEN8-NEXT:    sb a0, 13(sp)
+; RV64-ELEN8-NEXT:    sb zero, 12(sp)
+; RV64-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    addi a0, sp, 12
+; RV64-ELEN8-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN8-NEXT:    addi sp, sp, 16
+; RV64-ELEN8-NEXT:    ret
   %1 = insertelement <4 x i1> undef, i1 0, i32 0
   %2 = insertelement <4 x i1> %1,  i1 %x, i32 1
   %3 = insertelement <4 x i1> %2,  i1  1, i32 2
@@ -172,6 +821,48 @@ define <8 x i1> @buildvec_mask_v8i1() {
 ; CHECK-NEXT:    vsetivli zero, 1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_v8i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    li a0, 182
+; RV32-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV32-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_v8i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    li a0, 182
+; RV64-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV64-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_v8i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    li a0, 182
+; RV32-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_v8i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    li a0, 182
+; RV64-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_v8i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    li a0, 182
+; RV32-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_v8i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    li a0, 182
+; RV64-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN8-NEXT:    ret
   ret <8 x i1> <i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1>
 }
 
@@ -187,6 +878,78 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1(i1 %x, i1 %y) {
 ; CHECK-NEXT:    vand.vi v8, v8, 1
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_nonconst_v8i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    li a2, 19
+; RV32-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV32-ELEN32-NEXT:    vmv.s.x v0, a2
+; RV32-ELEN32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-ELEN32-NEXT:    vmv.v.x v8, a1
+; RV32-ELEN32-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV32-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_nonconst_v8i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    li a2, 19
+; RV64-ELEN32-NEXT:    vsetivli zero, 1, e8, mf4, ta, mu
+; RV64-ELEN32-NEXT:    vmv.s.x v0, a2
+; RV64-ELEN32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-ELEN32-NEXT:    vmv.v.x v8, a1
+; RV64-ELEN32-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV64-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_nonconst_v8i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    li a2, 19
+; RV32-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v0, a2
+; RV32-ELEN16-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    vmv.v.x v8, a1
+; RV32-ELEN16-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV32-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_nonconst_v8i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    li a2, 19
+; RV64-ELEN16-NEXT:    vsetivli zero, 1, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v0, a2
+; RV64-ELEN16-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    vmv.v.x v8, a1
+; RV64-ELEN16-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV64-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_nonconst_v8i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    li a2, 19
+; RV32-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v0, a2
+; RV32-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.v.x v8, a1
+; RV32-ELEN8-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV32-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_nonconst_v8i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    li a2, 19
+; RV64-ELEN8-NEXT:    vsetivli zero, 1, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v0, a2
+; RV64-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.v.x v8, a1
+; RV64-ELEN8-NEXT:    vmerge.vxm v8, v8, a0, v0
+; RV64-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN8-NEXT:    ret
   %1 = insertelement <8 x i1> undef, i1 %x, i32 0
   %2 = insertelement <8 x i1> %1,  i1 %x, i32 1
   %3 = insertelement <8 x i1> %2,  i1 %y, i32 2
@@ -219,6 +982,132 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %w) {
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_nonconst_v8i1_2:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    addi sp, sp, -16
+; RV32-ELEN32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN32-NEXT:    sb a2, 15(sp)
+; RV32-ELEN32-NEXT:    sb zero, 14(sp)
+; RV32-ELEN32-NEXT:    sb a3, 13(sp)
+; RV32-ELEN32-NEXT:    sb a0, 12(sp)
+; RV32-ELEN32-NEXT:    sb a1, 11(sp)
+; RV32-ELEN32-NEXT:    li a1, 1
+; RV32-ELEN32-NEXT:    sb a1, 10(sp)
+; RV32-ELEN32-NEXT:    sb a0, 9(sp)
+; RV32-ELEN32-NEXT:    sb a0, 8(sp)
+; RV32-ELEN32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-ELEN32-NEXT:    addi a0, sp, 8
+; RV32-ELEN32-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN32-NEXT:    addi sp, sp, 16
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_nonconst_v8i1_2:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    addi sp, sp, -16
+; RV64-ELEN32-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN32-NEXT:    sb a2, 15(sp)
+; RV64-ELEN32-NEXT:    sb zero, 14(sp)
+; RV64-ELEN32-NEXT:    sb a3, 13(sp)
+; RV64-ELEN32-NEXT:    sb a0, 12(sp)
+; RV64-ELEN32-NEXT:    sb a1, 11(sp)
+; RV64-ELEN32-NEXT:    li a1, 1
+; RV64-ELEN32-NEXT:    sb a1, 10(sp)
+; RV64-ELEN32-NEXT:    sb a0, 9(sp)
+; RV64-ELEN32-NEXT:    sb a0, 8(sp)
+; RV64-ELEN32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-ELEN32-NEXT:    addi a0, sp, 8
+; RV64-ELEN32-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN32-NEXT:    addi sp, sp, 16
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_nonconst_v8i1_2:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    addi sp, sp, -16
+; RV32-ELEN16-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN16-NEXT:    sb a2, 15(sp)
+; RV32-ELEN16-NEXT:    sb zero, 14(sp)
+; RV32-ELEN16-NEXT:    sb a3, 13(sp)
+; RV32-ELEN16-NEXT:    sb a0, 12(sp)
+; RV32-ELEN16-NEXT:    sb a1, 11(sp)
+; RV32-ELEN16-NEXT:    li a1, 1
+; RV32-ELEN16-NEXT:    sb a1, 10(sp)
+; RV32-ELEN16-NEXT:    sb a0, 9(sp)
+; RV32-ELEN16-NEXT:    sb a0, 8(sp)
+; RV32-ELEN16-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    addi a0, sp, 8
+; RV32-ELEN16-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN16-NEXT:    addi sp, sp, 16
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_nonconst_v8i1_2:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    addi sp, sp, -16
+; RV64-ELEN16-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN16-NEXT:    sb a2, 15(sp)
+; RV64-ELEN16-NEXT:    sb zero, 14(sp)
+; RV64-ELEN16-NEXT:    sb a3, 13(sp)
+; RV64-ELEN16-NEXT:    sb a0, 12(sp)
+; RV64-ELEN16-NEXT:    sb a1, 11(sp)
+; RV64-ELEN16-NEXT:    li a1, 1
+; RV64-ELEN16-NEXT:    sb a1, 10(sp)
+; RV64-ELEN16-NEXT:    sb a0, 9(sp)
+; RV64-ELEN16-NEXT:    sb a0, 8(sp)
+; RV64-ELEN16-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    addi a0, sp, 8
+; RV64-ELEN16-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN16-NEXT:    addi sp, sp, 16
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_nonconst_v8i1_2:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    addi sp, sp, -16
+; RV32-ELEN8-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN8-NEXT:    sb a2, 15(sp)
+; RV32-ELEN8-NEXT:    sb zero, 14(sp)
+; RV32-ELEN8-NEXT:    sb a3, 13(sp)
+; RV32-ELEN8-NEXT:    sb a0, 12(sp)
+; RV32-ELEN8-NEXT:    sb a1, 11(sp)
+; RV32-ELEN8-NEXT:    li a1, 1
+; RV32-ELEN8-NEXT:    sb a1, 10(sp)
+; RV32-ELEN8-NEXT:    sb a0, 9(sp)
+; RV32-ELEN8-NEXT:    sb a0, 8(sp)
+; RV32-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    addi a0, sp, 8
+; RV32-ELEN8-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN8-NEXT:    addi sp, sp, 16
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_nonconst_v8i1_2:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    addi sp, sp, -16
+; RV64-ELEN8-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN8-NEXT:    sb a2, 15(sp)
+; RV64-ELEN8-NEXT:    sb zero, 14(sp)
+; RV64-ELEN8-NEXT:    sb a3, 13(sp)
+; RV64-ELEN8-NEXT:    sb a0, 12(sp)
+; RV64-ELEN8-NEXT:    sb a1, 11(sp)
+; RV64-ELEN8-NEXT:    li a1, 1
+; RV64-ELEN8-NEXT:    sb a1, 10(sp)
+; RV64-ELEN8-NEXT:    sb a0, 9(sp)
+; RV64-ELEN8-NEXT:    sb a0, 8(sp)
+; RV64-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    addi a0, sp, 8
+; RV64-ELEN8-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN8-NEXT:    addi sp, sp, 16
+; RV64-ELEN8-NEXT:    ret
   %1 = insertelement <8 x i1> undef, i1 %x, i32 0
   %2 = insertelement <8 x i1> %1,  i1 %x, i32 1
   %3 = insertelement <8 x i1> %2,  i1  1, i32 2
@@ -251,6 +1140,132 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_optsize_nonconst_v8i1_2:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    addi sp, sp, -16
+; RV32-ELEN32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN32-NEXT:    sb a2, 15(sp)
+; RV32-ELEN32-NEXT:    sb zero, 14(sp)
+; RV32-ELEN32-NEXT:    sb a3, 13(sp)
+; RV32-ELEN32-NEXT:    sb a0, 12(sp)
+; RV32-ELEN32-NEXT:    sb a1, 11(sp)
+; RV32-ELEN32-NEXT:    li a1, 1
+; RV32-ELEN32-NEXT:    sb a1, 10(sp)
+; RV32-ELEN32-NEXT:    sb a0, 9(sp)
+; RV32-ELEN32-NEXT:    sb a0, 8(sp)
+; RV32-ELEN32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-ELEN32-NEXT:    addi a0, sp, 8
+; RV32-ELEN32-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN32-NEXT:    addi sp, sp, 16
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_optsize_nonconst_v8i1_2:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    addi sp, sp, -16
+; RV64-ELEN32-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN32-NEXT:    sb a2, 15(sp)
+; RV64-ELEN32-NEXT:    sb zero, 14(sp)
+; RV64-ELEN32-NEXT:    sb a3, 13(sp)
+; RV64-ELEN32-NEXT:    sb a0, 12(sp)
+; RV64-ELEN32-NEXT:    sb a1, 11(sp)
+; RV64-ELEN32-NEXT:    li a1, 1
+; RV64-ELEN32-NEXT:    sb a1, 10(sp)
+; RV64-ELEN32-NEXT:    sb a0, 9(sp)
+; RV64-ELEN32-NEXT:    sb a0, 8(sp)
+; RV64-ELEN32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-ELEN32-NEXT:    addi a0, sp, 8
+; RV64-ELEN32-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN32-NEXT:    addi sp, sp, 16
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_optsize_nonconst_v8i1_2:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    addi sp, sp, -16
+; RV32-ELEN16-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN16-NEXT:    sb a2, 15(sp)
+; RV32-ELEN16-NEXT:    sb zero, 14(sp)
+; RV32-ELEN16-NEXT:    sb a3, 13(sp)
+; RV32-ELEN16-NEXT:    sb a0, 12(sp)
+; RV32-ELEN16-NEXT:    sb a1, 11(sp)
+; RV32-ELEN16-NEXT:    li a1, 1
+; RV32-ELEN16-NEXT:    sb a1, 10(sp)
+; RV32-ELEN16-NEXT:    sb a0, 9(sp)
+; RV32-ELEN16-NEXT:    sb a0, 8(sp)
+; RV32-ELEN16-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    addi a0, sp, 8
+; RV32-ELEN16-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN16-NEXT:    addi sp, sp, 16
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_optsize_nonconst_v8i1_2:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    addi sp, sp, -16
+; RV64-ELEN16-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN16-NEXT:    sb a2, 15(sp)
+; RV64-ELEN16-NEXT:    sb zero, 14(sp)
+; RV64-ELEN16-NEXT:    sb a3, 13(sp)
+; RV64-ELEN16-NEXT:    sb a0, 12(sp)
+; RV64-ELEN16-NEXT:    sb a1, 11(sp)
+; RV64-ELEN16-NEXT:    li a1, 1
+; RV64-ELEN16-NEXT:    sb a1, 10(sp)
+; RV64-ELEN16-NEXT:    sb a0, 9(sp)
+; RV64-ELEN16-NEXT:    sb a0, 8(sp)
+; RV64-ELEN16-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    addi a0, sp, 8
+; RV64-ELEN16-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN16-NEXT:    addi sp, sp, 16
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_optsize_nonconst_v8i1_2:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    addi sp, sp, -16
+; RV32-ELEN8-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN8-NEXT:    sb a2, 15(sp)
+; RV32-ELEN8-NEXT:    sb zero, 14(sp)
+; RV32-ELEN8-NEXT:    sb a3, 13(sp)
+; RV32-ELEN8-NEXT:    sb a0, 12(sp)
+; RV32-ELEN8-NEXT:    sb a1, 11(sp)
+; RV32-ELEN8-NEXT:    li a1, 1
+; RV32-ELEN8-NEXT:    sb a1, 10(sp)
+; RV32-ELEN8-NEXT:    sb a0, 9(sp)
+; RV32-ELEN8-NEXT:    sb a0, 8(sp)
+; RV32-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    addi a0, sp, 8
+; RV32-ELEN8-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN8-NEXT:    addi sp, sp, 16
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_optsize_nonconst_v8i1_2:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    addi sp, sp, -16
+; RV64-ELEN8-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN8-NEXT:    sb a2, 15(sp)
+; RV64-ELEN8-NEXT:    sb zero, 14(sp)
+; RV64-ELEN8-NEXT:    sb a3, 13(sp)
+; RV64-ELEN8-NEXT:    sb a0, 12(sp)
+; RV64-ELEN8-NEXT:    sb a1, 11(sp)
+; RV64-ELEN8-NEXT:    li a1, 1
+; RV64-ELEN8-NEXT:    sb a1, 10(sp)
+; RV64-ELEN8-NEXT:    sb a0, 9(sp)
+; RV64-ELEN8-NEXT:    sb a0, 8(sp)
+; RV64-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    addi a0, sp, 8
+; RV64-ELEN8-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN8-NEXT:    addi sp, sp, 16
+; RV64-ELEN8-NEXT:    ret
   %1 = insertelement <8 x i1> undef, i1 %x, i32 0
   %2 = insertelement <8 x i1> %1,  i1 %x, i32 1
   %3 = insertelement <8 x i1> %2,  i1  1, i32 2
@@ -282,6 +1297,126 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1(i1 %x, i1 %y) optsize {
 ; CHECK-NEXT:    vmsne.vi v0, v8, 0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_optsize_nonconst_v8i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    addi sp, sp, -16
+; RV32-ELEN32-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN32-NEXT:    sb a1, 15(sp)
+; RV32-ELEN32-NEXT:    sb a1, 14(sp)
+; RV32-ELEN32-NEXT:    sb a1, 13(sp)
+; RV32-ELEN32-NEXT:    sb a0, 12(sp)
+; RV32-ELEN32-NEXT:    sb a1, 11(sp)
+; RV32-ELEN32-NEXT:    sb a1, 10(sp)
+; RV32-ELEN32-NEXT:    sb a0, 9(sp)
+; RV32-ELEN32-NEXT:    sb a0, 8(sp)
+; RV32-ELEN32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-ELEN32-NEXT:    addi a0, sp, 8
+; RV32-ELEN32-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN32-NEXT:    addi sp, sp, 16
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_optsize_nonconst_v8i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    addi sp, sp, -16
+; RV64-ELEN32-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN32-NEXT:    sb a1, 15(sp)
+; RV64-ELEN32-NEXT:    sb a1, 14(sp)
+; RV64-ELEN32-NEXT:    sb a1, 13(sp)
+; RV64-ELEN32-NEXT:    sb a0, 12(sp)
+; RV64-ELEN32-NEXT:    sb a1, 11(sp)
+; RV64-ELEN32-NEXT:    sb a1, 10(sp)
+; RV64-ELEN32-NEXT:    sb a0, 9(sp)
+; RV64-ELEN32-NEXT:    sb a0, 8(sp)
+; RV64-ELEN32-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-ELEN32-NEXT:    addi a0, sp, 8
+; RV64-ELEN32-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN32-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN32-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN32-NEXT:    addi sp, sp, 16
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_optsize_nonconst_v8i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    addi sp, sp, -16
+; RV32-ELEN16-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN16-NEXT:    sb a1, 15(sp)
+; RV32-ELEN16-NEXT:    sb a1, 14(sp)
+; RV32-ELEN16-NEXT:    sb a1, 13(sp)
+; RV32-ELEN16-NEXT:    sb a0, 12(sp)
+; RV32-ELEN16-NEXT:    sb a1, 11(sp)
+; RV32-ELEN16-NEXT:    sb a1, 10(sp)
+; RV32-ELEN16-NEXT:    sb a0, 9(sp)
+; RV32-ELEN16-NEXT:    sb a0, 8(sp)
+; RV32-ELEN16-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-ELEN16-NEXT:    addi a0, sp, 8
+; RV32-ELEN16-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN16-NEXT:    addi sp, sp, 16
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_optsize_nonconst_v8i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    addi sp, sp, -16
+; RV64-ELEN16-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN16-NEXT:    sb a1, 15(sp)
+; RV64-ELEN16-NEXT:    sb a1, 14(sp)
+; RV64-ELEN16-NEXT:    sb a1, 13(sp)
+; RV64-ELEN16-NEXT:    sb a0, 12(sp)
+; RV64-ELEN16-NEXT:    sb a1, 11(sp)
+; RV64-ELEN16-NEXT:    sb a1, 10(sp)
+; RV64-ELEN16-NEXT:    sb a0, 9(sp)
+; RV64-ELEN16-NEXT:    sb a0, 8(sp)
+; RV64-ELEN16-NEXT:    vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-ELEN16-NEXT:    addi a0, sp, 8
+; RV64-ELEN16-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN16-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN16-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN16-NEXT:    addi sp, sp, 16
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_optsize_nonconst_v8i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    addi sp, sp, -16
+; RV32-ELEN8-NEXT:    .cfi_def_cfa_offset 16
+; RV32-ELEN8-NEXT:    sb a1, 15(sp)
+; RV32-ELEN8-NEXT:    sb a1, 14(sp)
+; RV32-ELEN8-NEXT:    sb a1, 13(sp)
+; RV32-ELEN8-NEXT:    sb a0, 12(sp)
+; RV32-ELEN8-NEXT:    sb a1, 11(sp)
+; RV32-ELEN8-NEXT:    sb a1, 10(sp)
+; RV32-ELEN8-NEXT:    sb a0, 9(sp)
+; RV32-ELEN8-NEXT:    sb a0, 8(sp)
+; RV32-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    addi a0, sp, 8
+; RV32-ELEN8-NEXT:    vle8.v v8, (a0)
+; RV32-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV32-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV32-ELEN8-NEXT:    addi sp, sp, 16
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_optsize_nonconst_v8i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    addi sp, sp, -16
+; RV64-ELEN8-NEXT:    .cfi_def_cfa_offset 16
+; RV64-ELEN8-NEXT:    sb a1, 15(sp)
+; RV64-ELEN8-NEXT:    sb a1, 14(sp)
+; RV64-ELEN8-NEXT:    sb a1, 13(sp)
+; RV64-ELEN8-NEXT:    sb a0, 12(sp)
+; RV64-ELEN8-NEXT:    sb a1, 11(sp)
+; RV64-ELEN8-NEXT:    sb a1, 10(sp)
+; RV64-ELEN8-NEXT:    sb a0, 9(sp)
+; RV64-ELEN8-NEXT:    sb a0, 8(sp)
+; RV64-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    addi a0, sp, 8
+; RV64-ELEN8-NEXT:    vle8.v v8, (a0)
+; RV64-ELEN8-NEXT:    vand.vi v8, v8, 1
+; RV64-ELEN8-NEXT:    vmsne.vi v0, v8, 0
+; RV64-ELEN8-NEXT:    addi sp, sp, 16
+; RV64-ELEN8-NEXT:    ret
   %1 = insertelement <8 x i1> undef, i1 %x, i32 0
   %2 = insertelement <8 x i1> %1,  i1 %x, i32 1
   %3 = insertelement <8 x i1> %2,  i1 %y, i32 2
@@ -300,6 +1435,56 @@ define <10 x i1> @buildvec_mask_v10i1() {
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_v10i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    li a0, 949
+; RV32-ELEN32-NEXT:    vsetivli zero, 1, e16, mf2, ta, mu
+; RV32-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_v10i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    li a0, 949
+; RV64-ELEN32-NEXT:    vsetivli zero, 1, e16, mf2, ta, mu
+; RV64-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_v10i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    li a0, 949
+; RV32-ELEN16-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_v10i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    li a0, 949
+; RV64-ELEN16-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_v10i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    li a0, 3
+; RV32-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN8-NEXT:    li a0, 181
+; RV32-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN8-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 1
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_v10i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    li a0, 3
+; RV64-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN8-NEXT:    li a0, 181
+; RV64-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN8-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 1
+; RV64-ELEN8-NEXT:    ret
   ret <10 x i1> <i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 1>
 }
 
@@ -319,6 +1504,58 @@ define <16 x i1> @buildvec_mask_v16i1() {
 ; CHECK-RV64-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; CHECK-RV64-NEXT:    vmv.s.x v0, a0
 ; CHECK-RV64-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_v16i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    lui a0, 11
+; RV32-ELEN32-NEXT:    addi a0, a0, 1718
+; RV32-ELEN32-NEXT:    vsetivli zero, 1, e16, mf2, ta, mu
+; RV32-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_v16i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    lui a0, 11
+; RV64-ELEN32-NEXT:    addiw a0, a0, 1718
+; RV64-ELEN32-NEXT:    vsetivli zero, 1, e16, mf2, ta, mu
+; RV64-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_v16i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    lui a0, 11
+; RV32-ELEN16-NEXT:    addi a0, a0, 1718
+; RV32-ELEN16-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_v16i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    lui a0, 11
+; RV64-ELEN16-NEXT:    addiw a0, a0, 1718
+; RV64-ELEN16-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_v16i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    li a0, 182
+; RV32-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN8-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vmv.v.v v0, v8
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 1
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_v16i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    li a0, 182
+; RV64-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN8-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vmv.v.v v0, v8
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 1
+; RV64-ELEN8-NEXT:    ret
   ret <16 x i1> <i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1>
 }
 
@@ -329,6 +1566,56 @@ define <16 x i1> @buildvec_mask_v16i1_undefs() {
 ; CHECK-NEXT:    vsetivli zero, 1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vmv.s.x v0, a0
 ; CHECK-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_v16i1_undefs:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    li a0, 1722
+; RV32-ELEN32-NEXT:    vsetivli zero, 1, e16, mf2, ta, mu
+; RV32-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_v16i1_undefs:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    li a0, 1722
+; RV64-ELEN32-NEXT:    vsetivli zero, 1, e16, mf2, ta, mu
+; RV64-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_v16i1_undefs:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    li a0, 1722
+; RV32-ELEN16-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_v16i1_undefs:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    li a0, 1722
+; RV64-ELEN16-NEXT:    vsetivli zero, 1, e16, m1, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_v16i1_undefs:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    li a0, 6
+; RV32-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN8-NEXT:    li a0, 186
+; RV32-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN8-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 1
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_v16i1_undefs:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    li a0, 6
+; RV64-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN8-NEXT:    li a0, 186
+; RV64-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN8-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 1
+; RV64-ELEN8-NEXT:    ret
   ret <16 x i1> <i1 undef, i1 1, i1 undef, i1 1, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 undef, i1 undef, i1 undef, i1 undef, i1 undef>
 }
 
@@ -400,6 +1687,82 @@ define <32 x i1> @buildvec_mask_v32i1() {
 ; RV64-LMULMAX8-NEXT:    vsetivli zero, 1, e32, mf2, ta, mu
 ; RV64-LMULMAX8-NEXT:    vmv.s.x v0, a0
 ; RV64-LMULMAX8-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_v32i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    lui a0, 748384
+; RV32-ELEN32-NEXT:    addi a0, a0, 1776
+; RV32-ELEN32-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
+; RV32-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_v32i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    lui a0, 748384
+; RV64-ELEN32-NEXT:    addiw a0, a0, 1776
+; RV64-ELEN32-NEXT:    vsetivli zero, 1, e32, m1, ta, mu
+; RV64-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_v32i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    li a0, 1776
+; RV32-ELEN16-NEXT:    vsetivli zero, 2, e16, m1, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN16-NEXT:    lui a0, 11
+; RV32-ELEN16-NEXT:    addi a0, a0, 1718
+; RV32-ELEN16-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN16-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
+; RV32-ELEN16-NEXT:    vslideup.vi v0, v8, 1
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_v32i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    li a0, 1776
+; RV64-ELEN16-NEXT:    vsetivli zero, 2, e16, m1, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN16-NEXT:    lui a0, 11
+; RV64-ELEN16-NEXT:    addiw a0, a0, 1718
+; RV64-ELEN16-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN16-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
+; RV64-ELEN16-NEXT:    vslideup.vi v0, v8, 1
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_v32i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    li a0, 6
+; RV32-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN8-NEXT:    li a0, 240
+; RV32-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 1
+; RV32-ELEN8-NEXT:    li a0, 182
+; RV32-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN8-NEXT:    vsetivli zero, 3, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 2
+; RV32-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 3
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_v32i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    li a0, 6
+; RV64-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN8-NEXT:    li a0, 240
+; RV64-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 1
+; RV64-ELEN8-NEXT:    li a0, 182
+; RV64-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN8-NEXT:    vsetivli zero, 3, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 2
+; RV64-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 3
+; RV64-ELEN8-NEXT:    ret
   ret <32 x i1> <i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1>
 }
 
@@ -495,6 +1858,136 @@ define <64 x i1> @buildvec_mask_v64i1() {
 ; RV64-LMULMAX8-NEXT:    vsetivli zero, 1, e64, m1, ta, mu
 ; RV64-LMULMAX8-NEXT:    vmv.s.x v0, a0
 ; RV64-LMULMAX8-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_v64i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    lui a0, 748388
+; RV32-ELEN32-NEXT:    addi a0, a0, -1793
+; RV32-ELEN32-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
+; RV32-ELEN32-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN32-NEXT:    lui a0, 748384
+; RV32-ELEN32-NEXT:    addi a0, a0, 1776
+; RV32-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN32-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
+; RV32-ELEN32-NEXT:    vslideup.vi v0, v8, 1
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_v64i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    lui a0, 748388
+; RV64-ELEN32-NEXT:    addiw a0, a0, -1793
+; RV64-ELEN32-NEXT:    vsetivli zero, 2, e32, m1, ta, mu
+; RV64-ELEN32-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN32-NEXT:    lui a0, 748384
+; RV64-ELEN32-NEXT:    addiw a0, a0, 1776
+; RV64-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN32-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
+; RV64-ELEN32-NEXT:    vslideup.vi v0, v8, 1
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_v64i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    li a0, 1776
+; RV32-ELEN16-NEXT:    vsetivli zero, 4, e16, m1, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN16-NEXT:    lui a0, 11
+; RV32-ELEN16-NEXT:    addi a0, a0, 1718
+; RV32-ELEN16-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN16-NEXT:    vsetivli zero, 2, e16, m1, tu, mu
+; RV32-ELEN16-NEXT:    vslideup.vi v0, v8, 1
+; RV32-ELEN16-NEXT:    lui a0, 4
+; RV32-ELEN16-NEXT:    addi a0, a0, -1793
+; RV32-ELEN16-NEXT:    vsetivli zero, 4, e16, m1, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v9, a0
+; RV32-ELEN16-NEXT:    vsetivli zero, 3, e16, m1, tu, mu
+; RV32-ELEN16-NEXT:    vslideup.vi v0, v9, 2
+; RV32-ELEN16-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
+; RV32-ELEN16-NEXT:    vslideup.vi v0, v8, 3
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_v64i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    li a0, 1776
+; RV64-ELEN16-NEXT:    vsetivli zero, 4, e16, m1, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN16-NEXT:    lui a0, 11
+; RV64-ELEN16-NEXT:    addiw a0, a0, 1718
+; RV64-ELEN16-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN16-NEXT:    vsetivli zero, 2, e16, m1, tu, mu
+; RV64-ELEN16-NEXT:    vslideup.vi v0, v8, 1
+; RV64-ELEN16-NEXT:    lui a0, 4
+; RV64-ELEN16-NEXT:    addiw a0, a0, -1793
+; RV64-ELEN16-NEXT:    vsetivli zero, 4, e16, m1, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v9, a0
+; RV64-ELEN16-NEXT:    vsetivli zero, 3, e16, m1, tu, mu
+; RV64-ELEN16-NEXT:    vslideup.vi v0, v9, 2
+; RV64-ELEN16-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
+; RV64-ELEN16-NEXT:    vslideup.vi v0, v8, 3
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_v64i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    li a0, 6
+; RV32-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN8-NEXT:    li a0, 240
+; RV32-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 1
+; RV32-ELEN8-NEXT:    li a0, 182
+; RV32-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN8-NEXT:    vsetivli zero, 3, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 2
+; RV32-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 3
+; RV32-ELEN8-NEXT:    li a0, 255
+; RV32-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v9, a0
+; RV32-ELEN8-NEXT:    vsetivli zero, 5, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v9, 4
+; RV32-ELEN8-NEXT:    li a0, 56
+; RV32-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v9, a0
+; RV32-ELEN8-NEXT:    vsetivli zero, 6, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v9, 5
+; RV32-ELEN8-NEXT:    vsetivli zero, 7, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 6
+; RV32-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 7
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_v64i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    li a0, 6
+; RV64-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN8-NEXT:    li a0, 240
+; RV64-ELEN8-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 1
+; RV64-ELEN8-NEXT:    li a0, 182
+; RV64-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN8-NEXT:    vsetivli zero, 3, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 2
+; RV64-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 3
+; RV64-ELEN8-NEXT:    li a0, 255
+; RV64-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v9, a0
+; RV64-ELEN8-NEXT:    vsetivli zero, 5, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v9, 4
+; RV64-ELEN8-NEXT:    li a0, 56
+; RV64-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v9, a0
+; RV64-ELEN8-NEXT:    vsetivli zero, 6, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v9, 5
+; RV64-ELEN8-NEXT:    vsetivli zero, 7, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 6
+; RV64-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 7
+; RV64-ELEN8-NEXT:    ret
   ret <64 x i1> <i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1>
 }
 
@@ -647,6 +2140,246 @@ define <128 x i1> @buildvec_mask_v128i1() {
 ; RV64-LMULMAX8-NEXT:    vsetvli zero, zero, e64, m1, tu, mu
 ; RV64-LMULMAX8-NEXT:    vslideup.vi v0, v8, 1
 ; RV64-LMULMAX8-NEXT:    ret
+;
+; RV32-ELEN32-LABEL: buildvec_mask_v128i1:
+; RV32-ELEN32:       # %bb.0:
+; RV32-ELEN32-NEXT:    lui a0, 748388
+; RV32-ELEN32-NEXT:    addi a0, a0, -1793
+; RV32-ELEN32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; RV32-ELEN32-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN32-NEXT:    lui a0, 748384
+; RV32-ELEN32-NEXT:    addi a0, a0, 1776
+; RV32-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV32-ELEN32-NEXT:    vsetivli zero, 2, e32, m1, tu, mu
+; RV32-ELEN32-NEXT:    vslideup.vi v0, v8, 1
+; RV32-ELEN32-NEXT:    lui a0, 551776
+; RV32-ELEN32-NEXT:    addi a0, a0, 1776
+; RV32-ELEN32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; RV32-ELEN32-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN32-NEXT:    vsetivli zero, 3, e32, m1, tu, mu
+; RV32-ELEN32-NEXT:    vslideup.vi v0, v8, 2
+; RV32-ELEN32-NEXT:    lui a0, 945060
+; RV32-ELEN32-NEXT:    addi a0, a0, -1793
+; RV32-ELEN32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; RV32-ELEN32-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN32-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
+; RV32-ELEN32-NEXT:    vslideup.vi v0, v8, 3
+; RV32-ELEN32-NEXT:    ret
+;
+; RV64-ELEN32-LABEL: buildvec_mask_v128i1:
+; RV64-ELEN32:       # %bb.0:
+; RV64-ELEN32-NEXT:    lui a0, 748388
+; RV64-ELEN32-NEXT:    addiw a0, a0, -1793
+; RV64-ELEN32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; RV64-ELEN32-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN32-NEXT:    lui a0, 748384
+; RV64-ELEN32-NEXT:    addiw a0, a0, 1776
+; RV64-ELEN32-NEXT:    vmv.s.x v0, a0
+; RV64-ELEN32-NEXT:    vsetivli zero, 2, e32, m1, tu, mu
+; RV64-ELEN32-NEXT:    vslideup.vi v0, v8, 1
+; RV64-ELEN32-NEXT:    lui a0, 551776
+; RV64-ELEN32-NEXT:    addiw a0, a0, 1776
+; RV64-ELEN32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; RV64-ELEN32-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN32-NEXT:    vsetivli zero, 3, e32, m1, tu, mu
+; RV64-ELEN32-NEXT:    vslideup.vi v0, v8, 2
+; RV64-ELEN32-NEXT:    lui a0, 945060
+; RV64-ELEN32-NEXT:    addiw a0, a0, -1793
+; RV64-ELEN32-NEXT:    vsetivli zero, 4, e32, m1, ta, mu
+; RV64-ELEN32-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN32-NEXT:    vsetvli zero, zero, e32, m1, tu, mu
+; RV64-ELEN32-NEXT:    vslideup.vi v0, v8, 3
+; RV64-ELEN32-NEXT:    ret
+;
+; RV32-ELEN16-LABEL: buildvec_mask_v128i1:
+; RV32-ELEN16:       # %bb.0:
+; RV32-ELEN16-NEXT:    li a0, 1776
+; RV32-ELEN16-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN16-NEXT:    lui a0, 11
+; RV32-ELEN16-NEXT:    addi a0, a0, 1718
+; RV32-ELEN16-NEXT:    vmv.s.x v9, a0
+; RV32-ELEN16-NEXT:    vsetivli zero, 2, e16, m1, tu, mu
+; RV32-ELEN16-NEXT:    vmv1r.v v0, v8
+; RV32-ELEN16-NEXT:    vslideup.vi v0, v9, 1
+; RV32-ELEN16-NEXT:    lui a0, 4
+; RV32-ELEN16-NEXT:    addi a0, a0, -1793
+; RV32-ELEN16-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v10, a0
+; RV32-ELEN16-NEXT:    vsetivli zero, 3, e16, m1, tu, mu
+; RV32-ELEN16-NEXT:    vslideup.vi v0, v10, 2
+; RV32-ELEN16-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
+; RV32-ELEN16-NEXT:    vslideup.vi v0, v9, 3
+; RV32-ELEN16-NEXT:    vsetivli zero, 5, e16, m1, tu, mu
+; RV32-ELEN16-NEXT:    vslideup.vi v0, v8, 4
+; RV32-ELEN16-NEXT:    lui a0, 8
+; RV32-ELEN16-NEXT:    addi a0, a0, 1718
+; RV32-ELEN16-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN16-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
+; RV32-ELEN16-NEXT:    vslideup.vi v0, v8, 5
+; RV32-ELEN16-NEXT:    vsetivli zero, 7, e16, m1, tu, mu
+; RV32-ELEN16-NEXT:    vslideup.vi v0, v10, 6
+; RV32-ELEN16-NEXT:    lui a0, 14
+; RV32-ELEN16-NEXT:    addi a0, a0, 1722
+; RV32-ELEN16-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV32-ELEN16-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN16-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
+; RV32-ELEN16-NEXT:    vslideup.vi v0, v8, 7
+; RV32-ELEN16-NEXT:    ret
+;
+; RV64-ELEN16-LABEL: buildvec_mask_v128i1:
+; RV64-ELEN16:       # %bb.0:
+; RV64-ELEN16-NEXT:    li a0, 1776
+; RV64-ELEN16-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN16-NEXT:    lui a0, 11
+; RV64-ELEN16-NEXT:    addiw a0, a0, 1718
+; RV64-ELEN16-NEXT:    vmv.s.x v9, a0
+; RV64-ELEN16-NEXT:    vsetivli zero, 2, e16, m1, tu, mu
+; RV64-ELEN16-NEXT:    vmv1r.v v0, v8
+; RV64-ELEN16-NEXT:    vslideup.vi v0, v9, 1
+; RV64-ELEN16-NEXT:    lui a0, 4
+; RV64-ELEN16-NEXT:    addiw a0, a0, -1793
+; RV64-ELEN16-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v10, a0
+; RV64-ELEN16-NEXT:    vsetivli zero, 3, e16, m1, tu, mu
+; RV64-ELEN16-NEXT:    vslideup.vi v0, v10, 2
+; RV64-ELEN16-NEXT:    vsetivli zero, 4, e16, m1, tu, mu
+; RV64-ELEN16-NEXT:    vslideup.vi v0, v9, 3
+; RV64-ELEN16-NEXT:    vsetivli zero, 5, e16, m1, tu, mu
+; RV64-ELEN16-NEXT:    vslideup.vi v0, v8, 4
+; RV64-ELEN16-NEXT:    lui a0, 8
+; RV64-ELEN16-NEXT:    addiw a0, a0, 1718
+; RV64-ELEN16-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN16-NEXT:    vsetivli zero, 6, e16, m1, tu, mu
+; RV64-ELEN16-NEXT:    vslideup.vi v0, v8, 5
+; RV64-ELEN16-NEXT:    vsetivli zero, 7, e16, m1, tu, mu
+; RV64-ELEN16-NEXT:    vslideup.vi v0, v10, 6
+; RV64-ELEN16-NEXT:    lui a0, 14
+; RV64-ELEN16-NEXT:    addiw a0, a0, 1722
+; RV64-ELEN16-NEXT:    vsetivli zero, 8, e16, m1, ta, mu
+; RV64-ELEN16-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN16-NEXT:    vsetvli zero, zero, e16, m1, tu, mu
+; RV64-ELEN16-NEXT:    vslideup.vi v0, v8, 7
+; RV64-ELEN16-NEXT:    ret
+;
+; RV32-ELEN8-LABEL: buildvec_mask_v128i1:
+; RV32-ELEN8:       # %bb.0:
+; RV32-ELEN8-NEXT:    li a0, 6
+; RV32-ELEN8-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN8-NEXT:    li a0, 240
+; RV32-ELEN8-NEXT:    vmv.s.x v9, a0
+; RV32-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vmv1r.v v0, v9
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 1
+; RV32-ELEN8-NEXT:    li a0, 182
+; RV32-ELEN8-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v10, a0
+; RV32-ELEN8-NEXT:    vsetivli zero, 3, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v10, 2
+; RV32-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v10, 3
+; RV32-ELEN8-NEXT:    li a0, 255
+; RV32-ELEN8-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v11, a0
+; RV32-ELEN8-NEXT:    vsetivli zero, 5, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v11, 4
+; RV32-ELEN8-NEXT:    li a0, 56
+; RV32-ELEN8-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v12, a0
+; RV32-ELEN8-NEXT:    vsetivli zero, 6, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v12, 5
+; RV32-ELEN8-NEXT:    vsetivli zero, 7, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v10, 6
+; RV32-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v10, 7
+; RV32-ELEN8-NEXT:    vsetivli zero, 9, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v9, 8
+; RV32-ELEN8-NEXT:    vsetivli zero, 10, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 9
+; RV32-ELEN8-NEXT:    vsetivli zero, 11, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v10, 10
+; RV32-ELEN8-NEXT:    li a0, 134
+; RV32-ELEN8-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN8-NEXT:    vsetivli zero, 12, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 11
+; RV32-ELEN8-NEXT:    vsetivli zero, 13, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v11, 12
+; RV32-ELEN8-NEXT:    vsetivli zero, 14, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v12, 13
+; RV32-ELEN8-NEXT:    li a0, 186
+; RV32-ELEN8-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN8-NEXT:    vsetivli zero, 15, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 14
+; RV32-ELEN8-NEXT:    li a0, 230
+; RV32-ELEN8-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV32-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV32-ELEN8-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
+; RV32-ELEN8-NEXT:    vslideup.vi v0, v8, 15
+; RV32-ELEN8-NEXT:    ret
+;
+; RV64-ELEN8-LABEL: buildvec_mask_v128i1:
+; RV64-ELEN8:       # %bb.0:
+; RV64-ELEN8-NEXT:    li a0, 6
+; RV64-ELEN8-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN8-NEXT:    li a0, 240
+; RV64-ELEN8-NEXT:    vmv.s.x v9, a0
+; RV64-ELEN8-NEXT:    vsetivli zero, 2, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vmv1r.v v0, v9
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 1
+; RV64-ELEN8-NEXT:    li a0, 182
+; RV64-ELEN8-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v10, a0
+; RV64-ELEN8-NEXT:    vsetivli zero, 3, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v10, 2
+; RV64-ELEN8-NEXT:    vsetivli zero, 4, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v10, 3
+; RV64-ELEN8-NEXT:    li a0, 255
+; RV64-ELEN8-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v11, a0
+; RV64-ELEN8-NEXT:    vsetivli zero, 5, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v11, 4
+; RV64-ELEN8-NEXT:    li a0, 56
+; RV64-ELEN8-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v12, a0
+; RV64-ELEN8-NEXT:    vsetivli zero, 6, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v12, 5
+; RV64-ELEN8-NEXT:    vsetivli zero, 7, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v10, 6
+; RV64-ELEN8-NEXT:    vsetivli zero, 8, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v10, 7
+; RV64-ELEN8-NEXT:    vsetivli zero, 9, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v9, 8
+; RV64-ELEN8-NEXT:    vsetivli zero, 10, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 9
+; RV64-ELEN8-NEXT:    vsetivli zero, 11, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v10, 10
+; RV64-ELEN8-NEXT:    li a0, 134
+; RV64-ELEN8-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN8-NEXT:    vsetivli zero, 12, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 11
+; RV64-ELEN8-NEXT:    vsetivli zero, 13, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v11, 12
+; RV64-ELEN8-NEXT:    vsetivli zero, 14, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v12, 13
+; RV64-ELEN8-NEXT:    li a0, 186
+; RV64-ELEN8-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN8-NEXT:    vsetivli zero, 15, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 14
+; RV64-ELEN8-NEXT:    li a0, 230
+; RV64-ELEN8-NEXT:    vsetivli zero, 16, e8, m1, ta, mu
+; RV64-ELEN8-NEXT:    vmv.s.x v8, a0
+; RV64-ELEN8-NEXT:    vsetvli zero, zero, e8, m1, tu, mu
+; RV64-ELEN8-NEXT:    vslideup.vi v0, v8, 15
+; RV64-ELEN8-NEXT:    ret
   ret <128 x i1> <i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 0, i1 1, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 1, i1 1, i1 1>
 }
 
@@ -769,5 +2502,23 @@ define <128 x i1> @buildvec_mask_optsize_v128i1() optsize {
 ; RV64-LMULMAX8-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; RV64-LMULMAX8-NEXT:    vlm.v v0, (a0)
 ; RV64-LMULMAX8-NEXT:    ret
+;
+; RV32-ELEN-LABEL: buildvec_mask_optsize_v128i1:
+; RV32-ELEN:       # %bb.0:
+; RV32-ELEN-NEXT:    lui a0, %hi(.LCPI21_0)
+; RV32-ELEN-NEXT:    addi a0, a0, %lo(.LCPI21_0)
+; RV32-ELEN-NEXT:    li a1, 128
+; RV32-ELEN-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; RV32-ELEN-NEXT:    vlm.v v0, (a0)
+; RV32-ELEN-NEXT:    ret
+;
+; RV64-ELEN-LABEL: buildvec_mask_optsize_v128i1:
+; RV64-ELEN:       # %bb.0:
+; RV64-ELEN-NEXT:    lui a0, %hi(.LCPI21_0)
+; RV64-ELEN-NEXT:    addi a0, a0, %lo(.LCPI21_0)
+; RV64-ELEN-NEXT:    li a1, 128
+; RV64-ELEN-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; RV64-ELEN-NEXT:    vlm.v v0, (a0)
+; RV64-ELEN-NEXT:    ret
   ret <128 x i1> <i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 0, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 1, i1 1, i1 0, i1 0, i1 0, i1 1, i1 0, i1 1, i1 1, i1 1, i1 0, i1 1, i1 0, i1 1, i1 1, i1 0, i1 0, i1 1, i1 1, i1 1>
 }


        


More information about the llvm-commits mailing list